UPDATE
Browse files
app.py
CHANGED
|
@@ -104,17 +104,44 @@ async def generate_text(request: RequestModel):
|
|
| 104 |
# max_tokens=1024,
|
| 105 |
stream=True
|
| 106 |
)
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
#
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
generated_text = ""
|
| 119 |
for chunk in completion:
|
| 120 |
if chunk.choices[0].delta.content is not None:
|
|
|
|
| 104 |
# max_tokens=1024,
|
| 105 |
stream=True
|
| 106 |
)
|
| 107 |
+
|
| 108 |
+
generated_text = ""
|
| 109 |
+
for chunk in completion:
|
| 110 |
+
if chunk.choices[0].delta.content is not None:
|
| 111 |
+
generated_text += chunk.choices[0].delta.content
|
| 112 |
+
|
| 113 |
+
return {"summary_text_2": generated_text}
|
| 114 |
+
@app.post("/generate2/")
|
| 115 |
+
async def generate_text(request: RequestModel):
|
| 116 |
+
# Créer la requête pour l'API NVIDIA
|
| 117 |
+
completion = client.chat.completions.create(
|
| 118 |
+
model="meta/llama-3.1-8b-instruct",
|
| 119 |
+
messages=[{"role": "user", "content": default_prompt + request.text}],
|
| 120 |
+
temperature=0.2,
|
| 121 |
+
top_p=0.9,
|
| 122 |
+
# max_tokens=1024,
|
| 123 |
+
stream=True
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
generated_text = ""
|
| 127 |
+
for chunk in completion:
|
| 128 |
+
if chunk.choices[0].delta.content is not None:
|
| 129 |
+
generated_text += chunk.choices[0].delta.content
|
| 130 |
+
|
| 131 |
+
return {"summary_text_2": generated_text}
|
| 132 |
+
|
| 133 |
+
@app.post("/generate3/")
|
| 134 |
+
async def generate_text(request: RequestModel):
|
| 135 |
+
# Créer la requête pour l'API NVIDIA
|
| 136 |
+
completion = client.chat.completions.create(
|
| 137 |
+
model="meta/llama-3.1-8b-instruct",
|
| 138 |
+
messages=[{"role": "user", "content": instructions_par_defaut + request.text}],
|
| 139 |
+
temperature=0.2,
|
| 140 |
+
top_p=0.9,
|
| 141 |
+
# max_tokens=1024,
|
| 142 |
+
stream=True
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
generated_text = ""
|
| 146 |
for chunk in completion:
|
| 147 |
if chunk.choices[0].delta.content is not None:
|