Tim Luka Horstmann
commited on
Commit
·
43799fd
1
Parent(s):
16a3faa
Gemini updated
Browse files
app.py
CHANGED
@@ -151,7 +151,7 @@ async def stream_response_gemini(query, history):
|
|
151 |
start_time = time.time()
|
152 |
first_token_logged = False
|
153 |
|
154 |
-
# Build
|
155 |
current_date = datetime.now().strftime("%Y-%m-%d")
|
156 |
system_prompt = (
|
157 |
"You are Tim Luka Horstmann, a Computer Scientist. "
|
@@ -162,31 +162,33 @@ async def stream_response_gemini(query, history):
|
|
162 |
f"Today's date is {current_date}. CV: {full_cv_text}"
|
163 |
)
|
164 |
|
165 |
-
# Build only
|
166 |
-
contents = [
|
167 |
-
|
|
|
168 |
types.Content(
|
169 |
-
role=msg["role"],
|
170 |
-
parts=[types.Part.from_text(msg["content"])]
|
171 |
)
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
types.Content(
|
176 |
role="user",
|
177 |
-
parts=[types.Part.from_text(query)]
|
178 |
)
|
179 |
-
|
180 |
|
|
|
181 |
try:
|
182 |
response = gemini_client.models.generate_content_stream(
|
183 |
model=gemini_model,
|
184 |
contents=contents,
|
185 |
config=types.GenerateContentConfig(
|
186 |
system_instruction=system_prompt,
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
response_mime_type="text/plain",
|
191 |
)
|
192 |
)
|
@@ -204,6 +206,7 @@ async def stream_response_gemini(query, history):
|
|
204 |
yield f"data: Sorry, I encountered an error with Gemini API: {str(e)}\n\n"
|
205 |
yield "data: [DONE]\n\n"
|
206 |
|
|
|
207 |
async def stream_response_local(query, history):
|
208 |
"""Stream response using local model"""
|
209 |
logger.info(f"Processing query with local model: {query}")
|
|
|
151 |
start_time = time.time()
|
152 |
first_token_logged = False
|
153 |
|
154 |
+
# 1) Build your system prompt once
|
155 |
current_date = datetime.now().strftime("%Y-%m-%d")
|
156 |
system_prompt = (
|
157 |
"You are Tim Luka Horstmann, a Computer Scientist. "
|
|
|
162 |
f"Today's date is {current_date}. CV: {full_cv_text}"
|
163 |
)
|
164 |
|
165 |
+
# 2) Build only user/model history as `contents`
|
166 |
+
contents = []
|
167 |
+
for msg in history:
|
168 |
+
contents.append(
|
169 |
types.Content(
|
170 |
+
role=msg["role"],
|
171 |
+
parts=[ types.Part.from_text(msg["content"]) ]
|
172 |
)
|
173 |
+
)
|
174 |
+
# finally append the new user question
|
175 |
+
contents.append(
|
176 |
types.Content(
|
177 |
role="user",
|
178 |
+
parts=[ types.Part.from_text(query) ]
|
179 |
)
|
180 |
+
)
|
181 |
|
182 |
+
# 3) Call Gemini with `system_instruction`
|
183 |
try:
|
184 |
response = gemini_client.models.generate_content_stream(
|
185 |
model=gemini_model,
|
186 |
contents=contents,
|
187 |
config=types.GenerateContentConfig(
|
188 |
system_instruction=system_prompt,
|
189 |
+
temperature=0.3,
|
190 |
+
top_p=0.7,
|
191 |
+
max_output_tokens=512,
|
192 |
response_mime_type="text/plain",
|
193 |
)
|
194 |
)
|
|
|
206 |
yield f"data: Sorry, I encountered an error with Gemini API: {str(e)}\n\n"
|
207 |
yield "data: [DONE]\n\n"
|
208 |
|
209 |
+
|
210 |
async def stream_response_local(query, history):
|
211 |
"""Stream response using local model"""
|
212 |
logger.info(f"Processing query with local model: {query}")
|