Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -94,15 +94,18 @@ def generate_response(model, tokenizer, instruction, max_new_tokens=2048):
|
|
94 |
@app.post("/generate")
|
95 |
async def generate_text(input: ModelInput):
|
96 |
try:
|
|
|
97 |
response = generate_response(
|
98 |
model=model,
|
99 |
tokenizer=tokenizer,
|
100 |
instruction=input.prompt,
|
101 |
max_new_tokens=input.max_new_tokens
|
102 |
)
|
|
|
103 |
return {"generated_text": response}
|
104 |
|
105 |
except Exception as e:
|
|
|
106 |
raise HTTPException(status_code=500, detail=str(e))
|
107 |
|
108 |
@app.get("/")
|
|
|
94 |
@app.post("/generate")
|
95 |
async def generate_text(input: ModelInput):
|
96 |
try:
|
97 |
+
print(f"Received prompt: {input.prompt}") # Log the prompt received
|
98 |
response = generate_response(
|
99 |
model=model,
|
100 |
tokenizer=tokenizer,
|
101 |
instruction=input.prompt,
|
102 |
max_new_tokens=input.max_new_tokens
|
103 |
)
|
104 |
+
print(f"Generated response: {response}") # Log the generated response
|
105 |
return {"generated_text": response}
|
106 |
|
107 |
except Exception as e:
|
108 |
+
print(f"Error: {str(e)}") # Log the error
|
109 |
raise HTTPException(status_code=500, detail=str(e))
|
110 |
|
111 |
@app.get("/")
|