|
import gradio as gr |
|
|
|
def process_input(user_input): |
|
"""Process user input through the model and return the result.""" |
|
messages = [{"role": "user", "content": user_input}] |
|
|
|
|
|
input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device) |
|
outputs = model.generate(input_tensor, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id) |
|
result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True) |
|
|
|
return result |
|
|
|
|
|
demo = gr.Interface( |
|
fn=process_input, |
|
inputs=gr.Textbox(placeholder="Enter your equation (e.g. π₯ Γ· (π - π) = 2, π = 7, π = 3)"), |
|
outputs=gr.Textbox(label="Model Output"), |
|
title="Emoji Math Solver", |
|
description="Enter a math equation with emojis, and the model will solve it." |
|
) |
|
|
|
demo.launch(share=True) |
|
|
|
get_ipython().run_line_magic('pip', 'install peft') |
|
|
|
from peft import PeftModel |
|
|
|
import os |
|
from getpass import getpass |
|
from huggingface_hub import HfApi, Repository |
|
import re |
|
|
|
|
|
hf_token = getpass("Enter your Hugging Face token: ") |
|
api = HfApi(token=hf_token) |
|
|
|
|
|
space_name = input("Enter your Hugging Face Space name (username/space-name): ") |
|
|
|
|
|
|
|
from IPython import get_ipython |
|
|
|
|
|
cells = get_ipython().user_ns.get('In', []) |
|
|
|
|
|
gradio_code = [] |
|
in_gradio_block = False |
|
for cell in cells: |
|
|
|
if 'import gradio' in cell or 'gr.Interface' in cell or in_gradio_block: |
|
in_gradio_block = True |
|
gradio_code.append(cell) |
|
|
|
elif in_gradio_block and ('if __name__' in cell or 'demo.launch()' in cell): |
|
gradio_code.append(cell) |
|
in_gradio_block = False |
|
|
|
|
|
combined_code = "\n\n".join(gradio_code) |
|
|
|
|
|
if 'if __name__ == "__main__"' not in combined_code: |
|
combined_code += '\n\nif __name__ == "__main__":\n demo.launch()' |
|
|
|
|
|
with open("app.py", "w") as f: |
|
f.write(combined_code) |
|
|
|
print("Extracted Gradio code and saved to app.py") |
|
|
|
|
|
repo = Repository( |
|
local_dir="space_repo", |
|
clone_from=f"https://huggingface.co/spaces/{space_name}", |
|
token=hf_token, |
|
git_user="marwashahid", |
|
git_email="[email protected]" |
|
) |
|
|
|
|
|
import shutil |
|
shutil.copy("app.py", "space_repo/app.py") |
|
|
|
|
|
requirements = """ |
|
gradio>=3.50.2 |
|
""" |
|
with open("space_repo/requirements.txt", "w") as f: |
|
f.write(requirements) |
|
|
|
|
|
repo.git_add() |
|
repo.git_commit("Update from Kaggle notebook") |
|
repo.git_push() |
|
|
|
print(f"Successfully deployed to https://huggingface.co/spaces/{space_name}") |
|
|
|
|
|
demo = gr.Interface( |
|
fn=process_input, |
|
inputs=gr.Textbox(placeholder="Enter your equation:"), |
|
outputs=gr.Textbox(label="Model Output"), |
|
title="Math Problem Solver", |
|
description="Enter a math equation with emojis, and the model will solve it." |
|
) |
|
|
|
demo.launch(share=True) |
|
|
|
import os |
|
from getpass import getpass |
|
from huggingface_hub import HfApi, Repository |
|
import re |
|
|
|
|
|
hf_token = getpass("Enter your Hugging Face token: ") |
|
api = HfApi(token=hf_token) |
|
|
|
|
|
space_name = input("Enter your Hugging Face Space name (username/space-name): ") |
|
|
|
|
|
|
|
from IPython import get_ipython |
|
|
|
|
|
cells = get_ipython().user_ns.get('In', []) |
|
|
|
|
|
gradio_code = [] |
|
in_gradio_block = False |
|
for cell in cells: |
|
|
|
if 'import gradio' in cell or 'gr.Interface' in cell or in_gradio_block: |
|
in_gradio_block = True |
|
gradio_code.append(cell) |
|
|
|
elif in_gradio_block and ('if __name__' in cell or 'demo.launch()' in cell): |
|
gradio_code.append(cell) |
|
in_gradio_block = False |
|
|
|
|
|
combined_code = "\n\n".join(gradio_code) |
|
|
|
|
|
if 'if __name__ == "__main__"' not in combined_code: |
|
combined_code += '\n\nif __name__ == "__main__":\n demo.launch()' |
|
|
|
|
|
with open("app.py", "w") as f: |
|
f.write(combined_code) |
|
|
|
print("Extracted Gradio code and saved to app.py") |
|
|
|
|
|
repo = Repository( |
|
local_dir="space_repo", |
|
clone_from=f"https://huggingface.co/spaces/{space_name}", |
|
token=hf_token, |
|
git_user="marwashahid", |
|
git_email="[email protected]" |
|
) |
|
|
|
|
|
import shutil |
|
shutil.copy("app.py", "space_repo/app.py") |
|
|
|
|
|
requirements = """ |
|
gradio>=3.50.2 |
|
""" |
|
with open("space_repo/requirements.txt", "w") as f: |
|
f.write(requirements) |
|
|
|
|
|
repo.git_add() |
|
repo.git_commit("Update from Kaggle notebook") |
|
repo.git_push() |
|
|
|
print(f"Successfully deployed to https://huggingface.co/spaces/{space_name}") |