yvesJR-237 commited on
Commit
1c9af46
·
verified ·
1 Parent(s): 7b784c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -5
app.py CHANGED
@@ -83,7 +83,7 @@ def load_summarizer():
83
  return summarizer
84
 
85
  # generator, tokenizer = load_generator()
86
- generator = pipeline("text-generation", model="microsoft/BioGPT")
87
  summarizer = load_summarizer()
88
 
89
  image_file = st.sidebar.file_uploader("Upload an Image (.jpg, .jpeg, .png)", type=["jpg", "jpeg", "png"])
@@ -167,14 +167,26 @@ with tab2:
167
  # # Extract the generated part of the text
168
  # generated_response = generated_text[prompt_length:]
169
  # st.session_state.chat_response = generated_response
170
-
171
 
172
  # medical_term = "myocardial infarction"
173
- medical_term = prompt
174
- description = generator(f"Explain the medical term: {medical_term}")
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  # st.session_state.chat_response = outputs
177
- st.session_state.chat_response = description
178
  # Display the generated description
179
  st.subheader("Generated Description:")
180
  st.write(prompt, '--')
 
83
  return summarizer
84
 
85
  # generator, tokenizer = load_generator()
86
+ generator = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1")
87
  summarizer = load_summarizer()
88
 
89
  image_file = st.sidebar.file_uploader("Upload an Image (.jpg, .jpeg, .png)", type=["jpg", "jpeg", "png"])
 
167
  # # Extract the generated part of the text
168
  # generated_response = generated_text[prompt_length:]
169
  # st.session_state.chat_response = generated_response
 
170
 
171
  # medical_term = "myocardial infarction"
172
+ # medical_term = prompt
173
+ # description = generator(f"Explain the medical term: {medical_term}")
174
+
175
+ user_input = st.text_area("Enter your prompt:", prompt)
176
+ # Slider for controlling the length of the generated text
177
+ max_length = st.slider("Max length of generated text", 50, 500, 100)
178
+ # Button to generate text
179
+ if st.button("Generate Text"):
180
+ with st.spinner("Generating text..."):
181
+ # Generate text using the model
182
+ output = generator(user_input, max_length=max_length, num_return_sequences=1)
183
+ generated_text = output[0]['generated_text']
184
+ # Display the generated text
185
+ st.subheader("Generated Text:")
186
+ st.write(generated_text)
187
 
188
  # st.session_state.chat_response = outputs
189
+ st.session_state.chat_response = generated_text
190
  # Display the generated description
191
  st.subheader("Generated Description:")
192
  st.write(prompt, '--')