jashu827 commited on
Commit
77786d3
Β·
verified Β·
1 Parent(s): 1200fcf

Update app.py

Browse files

changed model to llama 7b

Files changed (1) hide show
  1. app.py +23 -20
app.py CHANGED
@@ -1,26 +1,29 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
 
5
- # Load the small model (WizardCoder-1B)
6
- model_name = "WizardLM/WizardCoder-1B-V1.0"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu")
9
 
10
- def generate_manim_code(prompt):
11
- inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
12
- outputs = model.generate(**inputs, max_new_tokens=256, temperature=0.7)
13
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
- return response
 
 
15
 
16
- description = "πŸ’‘ Enter a prompt like: *Write a Manim script to animate a growing square*"
 
 
17
 
18
- iface = gr.Interface(
19
- fn=generate_manim_code,
20
- inputs=gr.Textbox(label="Prompt", placeholder="e.g., Create a bouncing ball using Manim"),
21
- outputs=gr.Textbox(label="Generated Manim Code"),
22
- title="🎬 Manim Code Generator",
23
- description=description
24
- )
25
 
26
- iface.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
 
3
 
4
+ # Path to model (downloaded automatically from HF hub)
5
+ MODEL_PATH = "TheBloke/CodeLlama-7B-Instruct-GGUF"
 
 
6
 
7
+ # Load LLM (first time takes time)
8
+ llm = Llama.from_pretrained(
9
+ repo_id=MODEL_PATH,
10
+ filename="codellama-7b-instruct.Q4_K_M.gguf", # 4-bit quantized version
11
+ n_ctx=2048,
12
+ verbose=True
13
+ )
14
 
15
+ # Prompt wrapper
16
+ def build_prompt(user_prompt):
17
+ return f"[INST] Write Python code using the Manim library: {user_prompt} [/INST]"
18
 
19
+ def generate_code(prompt):
20
+ result = llm(build_prompt(prompt), max_tokens=512, temperature=0.7, stop=["</s>"])
21
+ return result["choices"][0]["text"]
 
 
 
 
22
 
23
+ gr.Interface(
24
+ fn=generate_code,
25
+ inputs=gr.Textbox(label="Prompt", placeholder="e.g., Animate a bouncing ball in Manim"),
26
+ outputs=gr.Textbox(label="Generated Python Code"),
27
+ title="🐍 Manim Code Generator - Code Llama 7B",
28
+ description="Powered by llama-cpp and Code Llama 7B (Quantized). Runs on CPU!"
29
+ ).launch()