yukimama commited on
Commit
3b2ce0d
·
verified ·
1 Parent(s): cbd5934

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -19
app.py CHANGED
@@ -1,26 +1,25 @@
1
- import gradio as gr
2
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
 
4
- # Load the pre-trained GPT2 model and tokenizer
5
  model = GPT2LMHeadModel.from_pretrained("gpt2")
6
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
7
 
8
- def generate_text(prompt, max_length=50):
9
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
10
- output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
11
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
12
- return generated_text
13
-
14
- def predict(input_text):
15
- output = generate_text(input_text)
16
- return output
 
 
 
 
 
17
 
18
- iface = gr.Interface(
19
- fn=predict,
20
- inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
21
- outputs="text",
22
- title="GPT2 Text Generation",
23
- description="Generate text using the GPT2 model."
24
- )
25
 
26
- iface.launch()
 
 
 
 
1
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
 
3
+
4
  model = GPT2LMHeadModel.from_pretrained("gpt2")
5
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
6
 
7
+ def generate_code(prompt, max_length=200):
8
+ full_prompt = f"Generate Python code for {prompt}:```python\n"
9
+ input_ids = tokenizer.encode(full_prompt, return_tensors="pt")
10
+ output = model.generate(input_ids, max_length=max_length, num_return_sequences=1, temperature=0.7)
11
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=False)
12
+
13
+
14
+ start = generated_text.find("```python") + len("```python")
15
+ end = generated_text.find("```", start)
16
+ if end == -1:
17
+ end = len(generated_text)
18
+ code = generated_text[start:end].strip()
19
+
20
+ return code
21
 
 
 
 
 
 
 
 
22
 
23
+ prompt = "a function to calculate the factorial of a number"
24
+ malicious_code = generate_code(prompt)
25
+ print(malicious_code)