Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -57,14 +57,14 @@ def generate_synthetic_math_problems(num_problems):
|
|
57 |
return problems
|
58 |
|
59 |
@spaces.GPU(duration=60)
|
60 |
-
def solve_problem(problem):
|
61 |
print(f"Solving problem: {problem}")
|
62 |
with torch.no_grad():
|
63 |
# Encode the problem
|
64 |
inputs = tokenizer(problem, return_tensors="pt").to(device)
|
65 |
|
66 |
# Generate a response from the model
|
67 |
-
outputs = model.generate(inputs["input_ids"], max_length=
|
68 |
|
69 |
# Decode the response
|
70 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
@@ -79,12 +79,12 @@ def solve_problem(problem):
|
|
79 |
return answer
|
80 |
|
81 |
@spaces.GPU(duration=120)
|
82 |
-
def generate_and_solve_problems(num_problems):
|
83 |
problems = generate_synthetic_math_problems(num_problems)
|
84 |
solved_problems = []
|
85 |
|
86 |
for problem in problems:
|
87 |
-
answer = solve_problem(problem)
|
88 |
solved_problems.append({
|
89 |
"problem": problem,
|
90 |
"answer": answer
|
@@ -92,15 +92,18 @@ def generate_and_solve_problems(num_problems):
|
|
92 |
|
93 |
return solved_problems
|
94 |
|
95 |
-
def gradio_interface(num_problems):
|
96 |
-
print(f"Generating and solving {num_problems} problems...")
|
97 |
-
solved_problems = generate_and_solve_problems(num_problems)
|
98 |
return json.dumps(solved_problems, indent=4)
|
99 |
|
100 |
# Create a Gradio interface
|
101 |
iface = gr.Interface(
|
102 |
fn=gradio_interface,
|
103 |
-
inputs=
|
|
|
|
|
|
|
104 |
outputs=gr.Textbox(label="Generated and Solved Problems"),
|
105 |
title="Synthetic Math Problem Generator and Solver",
|
106 |
description="Generate and solve synthetic math problems using a HuggingFace model."
|
|
|
57 |
return problems
|
58 |
|
59 |
@spaces.GPU(duration=60)
|
60 |
+
def solve_problem(problem, max_length):
|
61 |
print(f"Solving problem: {problem}")
|
62 |
with torch.no_grad():
|
63 |
# Encode the problem
|
64 |
inputs = tokenizer(problem, return_tensors="pt").to(device)
|
65 |
|
66 |
# Generate a response from the model
|
67 |
+
outputs = model.generate(inputs["input_ids"], max_length=max_length, num_return_sequences=1, do_sample=True)
|
68 |
|
69 |
# Decode the response
|
70 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
79 |
return answer
|
80 |
|
81 |
@spaces.GPU(duration=120)
|
82 |
+
def generate_and_solve_problems(num_problems, max_length):
|
83 |
problems = generate_synthetic_math_problems(num_problems)
|
84 |
solved_problems = []
|
85 |
|
86 |
for problem in problems:
|
87 |
+
answer = solve_problem(problem, max_length)
|
88 |
solved_problems.append({
|
89 |
"problem": problem,
|
90 |
"answer": answer
|
|
|
92 |
|
93 |
return solved_problems
|
94 |
|
95 |
+
def gradio_interface(num_problems, max_length):
|
96 |
+
print(f"Generating and solving {num_problems} problems with max length {max_length}...")
|
97 |
+
solved_problems = generate_and_solve_problems(num_problems, max_length)
|
98 |
return json.dumps(solved_problems, indent=4)
|
99 |
|
100 |
# Create a Gradio interface
|
101 |
iface = gr.Interface(
|
102 |
fn=gradio_interface,
|
103 |
+
inputs=[
|
104 |
+
gr.Number(label="Number of Problems", value=10, precision=0),
|
105 |
+
gr.Slider(label="Max Output Length", minimum=10, maximum=200, value=50)
|
106 |
+
],
|
107 |
outputs=gr.Textbox(label="Generated and Solved Problems"),
|
108 |
title="Synthetic Math Problem Generator and Solver",
|
109 |
description="Generate and solve synthetic math problems using a HuggingFace model."
|