Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer | |
| # Assuming you have loaded your model and tokenizer | |
| # Replace this with your actual model and tokenizer | |
| # Define the model function for Gradio | |
| def generate_summary(input_text): | |
| # # Tokenize the input text | |
| # inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) | |
| # # Generate summary using the model | |
| # outputs = model.generate(**inputs) | |
| # # Decode the generated summary | |
| # summary = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # return summary | |
| # Create a text generation pipeline | |
| # text_generation_pipeline = pipeline("Falconsai/medical_summarization", model=model, tokenizer=tokenizer) | |
| tokenizer = AutoTokenizer.from_pretrained("Falconsai/medical_summarization") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("Shariar00/medical_summarization_finetune_medical_qa") | |
| text_generation_pipeline = pipeline("summarization", model=model, tokenizer=tokenizer) | |
| # Generate text using the pipeline | |
| prompt = "Hello, I am feeling very pain on my leg, I can not walk properly. I have some knee pain also. what can I do now?" | |
| output = text_generation_pipeline(input_text, max_length=512, num_return_sequences=1) | |
| # Print the generated text | |
| generated_text = output[0] | |
| return generated_text | |
| # Create a Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_summary, | |
| inputs="text", | |
| outputs="text", | |
| # Set to True for live updates without restarting the server | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() | |