Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,74 +1,26 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from playwright.sync_api import sync_playwright
|
4 |
-
|
5 |
-
from jax import random
|
6 |
-
import jax
|
7 |
-
import jax.numpy as jnp
|
8 |
|
9 |
-
# Define
|
10 |
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
|
11 |
MAX_LENGTH = 512
|
12 |
NUM_BEAMS = 5
|
13 |
-
|
14 |
-
# Define Flax model for action generation
|
15 |
-
class ActionModel(nn.Module):
|
16 |
-
vocab_size: int
|
17 |
-
hidden_size: int
|
18 |
-
num_layers: int
|
19 |
-
|
20 |
-
def setup(self):
|
21 |
-
self.embedding = nn.Embed(self.vocab_size, self.hidden_size)
|
22 |
-
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size, num_layers=self.num_layers)
|
23 |
-
self.dense = nn.Dense(self.vocab_size)
|
24 |
-
|
25 |
-
def __call__(self, inputs, init_state):
|
26 |
-
embedded = self.embedding(inputs)
|
27 |
-
output, new_state = self.lstm(embedded, init_state)
|
28 |
-
logits = self.dense(output)
|
29 |
-
return logits, new_state
|
30 |
-
|
31 |
-
# Initialize Flax model and get its initial state
|
32 |
-
vocab_size = 50257 # Adjust this if needed for Zephyr-7b-beta
|
33 |
-
hidden_size = 1024
|
34 |
-
num_layers = 2
|
35 |
-
key = random.PRNGKey(0)
|
36 |
-
model = ActionModel(vocab_size, hidden_size, num_layers)
|
37 |
-
init_state = model.lstm.initialize_carry(key, (1, hidden_size))
|
38 |
|
39 |
# Function to generate actions using Zephyr-7b-beta model
|
40 |
-
def generate_actions(input_text
|
41 |
-
# Load Zephyr-7b-beta model
|
42 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
43 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
44 |
|
45 |
-
#
|
46 |
-
inputs = tokenizer(input_text, return_tensors="pt")
|
47 |
-
inputs = inputs.to(model.device)
|
48 |
-
|
49 |
-
# Generate response (use pipeline for Zephyr-7b-beta)
|
50 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
51 |
-
outputs = generator(input_text, max_length=MAX_LENGTH, num_beams=NUM_BEAMS, temperature=
|
52 |
|
53 |
-
# Decode response and extract actions
|
54 |
response = outputs[0]['generated_text']
|
55 |
actions = response.split("\n")
|
56 |
-
|
57 |
-
# Perform actions
|
58 |
-
for action in actions:
|
59 |
-
if "open website" in action:
|
60 |
-
website = action.split(" ")[-1]
|
61 |
-
page.goto(website)
|
62 |
-
elif "click" in action:
|
63 |
-
selector = action.split(" ")[-1]
|
64 |
-
page.click(selector)
|
65 |
-
elif "type" in action:
|
66 |
-
text = action.split(" ")[-1]
|
67 |
-
page.type(text)
|
68 |
-
elif "submit" in action:
|
69 |
-
page.press("Enter")
|
70 |
-
else:
|
71 |
-
print(f"Action not recognized: {action}")
|
72 |
|
73 |
# Function to initialize browser and page
|
74 |
def initialize_browser():
|
@@ -81,7 +33,23 @@ def initialize_browser():
|
|
81 |
def run_agent(input_text):
|
82 |
with sync_playwright() as p:
|
83 |
browser, page = initialize_browser()
|
84 |
-
generate_actions(input_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
return f"Successfully executed actions based on: {input_text}"
|
86 |
|
87 |
iface = gr.Interface(
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from playwright.sync_api import sync_playwright
|
4 |
+
import json
|
|
|
|
|
|
|
5 |
|
6 |
+
# Define model and inference parameters
|
7 |
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
|
8 |
MAX_LENGTH = 512
|
9 |
NUM_BEAMS = 5
|
10 |
+
TEMPERATURE = 0.7
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Function to generate actions using Zephyr-7b-beta model
|
13 |
+
def generate_actions(input_text):
|
|
|
14 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
15 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
16 |
|
17 |
+
# Use pipeline for text generation
|
|
|
|
|
|
|
|
|
18 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
19 |
+
outputs = generator(input_text, max_length=MAX_LENGTH, num_beams=NUM_BEAMS, temperature=TEMPERATURE)
|
20 |
|
|
|
21 |
response = outputs[0]['generated_text']
|
22 |
actions = response.split("\n")
|
23 |
+
return actions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# Function to initialize browser and page
|
26 |
def initialize_browser():
|
|
|
33 |
def run_agent(input_text):
|
34 |
with sync_playwright() as p:
|
35 |
browser, page = initialize_browser()
|
36 |
+
actions = generate_actions(input_text)
|
37 |
+
|
38 |
+
for action in actions:
|
39 |
+
if "open website" in action:
|
40 |
+
website = action.split(" ")[-1]
|
41 |
+
page.goto(website)
|
42 |
+
elif "click" in action:
|
43 |
+
selector = action.split(" ")[-1]
|
44 |
+
page.click(selector)
|
45 |
+
elif "type" in action:
|
46 |
+
text = action.split(" ")[-1]
|
47 |
+
page.type(text)
|
48 |
+
elif "submit" in action:
|
49 |
+
page.press("Enter")
|
50 |
+
else:
|
51 |
+
print(f"Action not recognized: {action}")
|
52 |
+
|
53 |
return f"Successfully executed actions based on: {input_text}"
|
54 |
|
55 |
iface = gr.Interface(
|