Spaces:
Runtime error
Runtime error
0.55 Spreadsheet setting vars
Browse files
app.py
CHANGED
|
@@ -41,28 +41,11 @@ challenger_models = [{"id": "NousResearch/Meta-Llama-3.1-8B-Instruct",
|
|
| 41 |
challenger_model = choice(challenger_models)
|
| 42 |
model_info.append(challenger_model)
|
| 43 |
shuffle(model_info)
|
|
|
|
|
|
|
| 44 |
|
| 45 |
device = "cuda"
|
| 46 |
|
| 47 |
-
def get_google_credentials():
|
| 48 |
-
service_account_info = {
|
| 49 |
-
"type": "service_account",
|
| 50 |
-
"project_id": os.environ.get("GOOGLE_PROJECT_ID"),
|
| 51 |
-
"private_key_id": os.environ.get("GOOGLE_PRIVATE_KEY_ID"),
|
| 52 |
-
"private_key": os.environ.get("GOOGLE_PRIVATE_KEY").replace('\\n', '\n'),
|
| 53 |
-
"client_email": os.environ.get("GOOGLE_CLIENT_EMAIL"),
|
| 54 |
-
"client_id": os.environ.get("GOOGLE_CLIENT_ID"),
|
| 55 |
-
"auth_uri": os.environ.get("GOOGLE_AUTH_URI"),
|
| 56 |
-
"token_uri": os.environ.get("GOOGLE_TOKEN_URI"),
|
| 57 |
-
"auth_provider_x509_cert_url": os.environ.get("GOOGLE_AUTH_PROVIDER_CERT_URL"),
|
| 58 |
-
"client_x509_cert_url": os.environ.get("GOOGLE_CLIENT_CERT_URL")
|
| 59 |
-
}
|
| 60 |
-
|
| 61 |
-
credentials = Credentials.from_service_account_info(service_account_info)
|
| 62 |
-
return credentials
|
| 63 |
-
|
| 64 |
-
logging.debug(get_google_credentials())
|
| 65 |
-
|
| 66 |
try:
|
| 67 |
tokenizer_a = AutoTokenizer.from_pretrained(model_info[0]['id'])
|
| 68 |
model_a = AutoModelForCausalLM.from_pretrained(
|
|
@@ -84,6 +67,31 @@ except Exception as e:
|
|
| 84 |
logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
|
| 85 |
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
def apply_pharia_template(messages, add_generation_prompt=False):
|
| 88 |
"""Chat template not defined in Pharia model configs.
|
| 89 |
Adds chat template for Pharia. Expects a list of messages.
|
|
@@ -109,7 +117,10 @@ def apply_pharia_template(messages, add_generation_prompt=False):
|
|
| 109 |
|
| 110 |
|
| 111 |
@spaces.GPU()
|
| 112 |
-
def generate_both(system_prompt, input_text,
|
|
|
|
|
|
|
|
|
|
| 113 |
try:
|
| 114 |
text_streamer_a = TextIteratorStreamer(tokenizer_a, skip_prompt=True)
|
| 115 |
text_streamer_b = TextIteratorStreamer(tokenizer_b, skip_prompt=True)
|
|
@@ -229,21 +240,29 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
|
|
| 229 |
except Exception as e:
|
| 230 |
logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
|
| 231 |
|
|
|
|
| 232 |
return chatbot_a, chatbot_b
|
| 233 |
|
| 234 |
def clear():
|
| 235 |
return [], []
|
| 236 |
|
| 237 |
-
def
|
| 238 |
if selection == "Bot A kicks ass!":
|
| 239 |
-
chatbot_a.append(["π", f"Thanks, man. I am {
|
| 240 |
-
chatbot_b.append(["π©", f"Pffff β¦ I am {
|
|
|
|
|
|
|
| 241 |
elif selection == "Bot B crushes it!":
|
| 242 |
-
chatbot_a.append(["π€‘", f"Rigged β¦ I am {
|
| 243 |
-
chatbot_b.append(["π₯", f"Well deserved! I am {
|
|
|
|
|
|
|
| 244 |
else:
|
| 245 |
-
chatbot_a.append(["π€", f"Lame β¦ I am {
|
| 246 |
-
chatbot_b.append(["π€", f"Dunno. I am {
|
|
|
|
|
|
|
|
|
|
| 247 |
return chatbot_a, chatbot_b
|
| 248 |
|
| 249 |
with gr.Blocks() as demo:
|
|
@@ -285,7 +304,7 @@ with gr.Blocks() as demo:
|
|
| 285 |
outputs=[system_prompt]
|
| 286 |
)
|
| 287 |
|
| 288 |
-
better_bot.select(
|
| 289 |
input_text.submit(generate_both, inputs=[system_prompt, input_text, chatbot_a, chatbot_b, max_new_tokens, temperature, top_p, repetition_penalty], outputs=[chatbot_a, chatbot_b])
|
| 290 |
submit_btn.click(generate_both, inputs=[system_prompt, input_text, chatbot_a, chatbot_b, max_new_tokens, temperature, top_p, repetition_penalty], outputs=[chatbot_a, chatbot_b])
|
| 291 |
clear_btn.click(clear, outputs=[chatbot_a, chatbot_b])
|
|
|
|
| 41 |
challenger_model = choice(challenger_models)
|
| 42 |
model_info.append(challenger_model)
|
| 43 |
shuffle(model_info)
|
| 44 |
+
chatbot_a_name = model_info[0]['name']
|
| 45 |
+
chatbot_b_name = model_info[1]['name']
|
| 46 |
|
| 47 |
device = "cuda"
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
try:
|
| 50 |
tokenizer_a = AutoTokenizer.from_pretrained(model_info[0]['id'])
|
| 51 |
model_a = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 67 |
logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
|
| 68 |
|
| 69 |
|
| 70 |
+
def get_google_credentials():
|
| 71 |
+
service_account_info = {
|
| 72 |
+
"type": "service_account",
|
| 73 |
+
"project_id": os.environ.get("GOOGLE_PROJECT_ID"),
|
| 74 |
+
"private_key_id": os.environ.get("GOOGLE_PRIVATE_KEY_ID"),
|
| 75 |
+
"private_key": os.environ.get("GOOGLE_PRIVATE_KEY").replace('\\n', '\n'),
|
| 76 |
+
"client_email": os.environ.get("GOOGLE_CLIENT_EMAIL"),
|
| 77 |
+
"client_id": os.environ.get("GOOGLE_CLIENT_ID"),
|
| 78 |
+
"auth_uri": os.environ.get("GOOGLE_AUTH_URI"),
|
| 79 |
+
"token_uri": os.environ.get("GOOGLE_TOKEN_URI"),
|
| 80 |
+
"auth_provider_x509_cert_url": os.environ.get("GOOGLE_AUTH_PROVIDER_CERT_URL"),
|
| 81 |
+
"client_x509_cert_url": os.environ.get("GOOGLE_CLIENT_CERT_URL")
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
credentials = Credentials.from_service_account_info(service_account_info)
|
| 85 |
+
return credentials
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def get_google_sheet():
|
| 89 |
+
credentials = get_google_credentials()
|
| 90 |
+
client = gspread.authorize(credentials)
|
| 91 |
+
sheet = client.open("pharia_bot_battle_logs").sheet1 # Open your Google Sheet
|
| 92 |
+
return sheet
|
| 93 |
+
|
| 94 |
+
|
| 95 |
def apply_pharia_template(messages, add_generation_prompt=False):
|
| 96 |
"""Chat template not defined in Pharia model configs.
|
| 97 |
Adds chat template for Pharia. Expects a list of messages.
|
|
|
|
| 117 |
|
| 118 |
|
| 119 |
@spaces.GPU()
|
| 120 |
+
def generate_both(system_prompt, input_text,
|
| 121 |
+
chatbot_a, chatbot_b,
|
| 122 |
+
max_new_tokens=2048, temperature=0.2,
|
| 123 |
+
top_p=0.9, repetition_penalty=1.1):
|
| 124 |
try:
|
| 125 |
text_streamer_a = TextIteratorStreamer(tokenizer_a, skip_prompt=True)
|
| 126 |
text_streamer_b = TextIteratorStreamer(tokenizer_b, skip_prompt=True)
|
|
|
|
| 240 |
except Exception as e:
|
| 241 |
logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
|
| 242 |
|
| 243 |
+
logging.debug(f'Output complete chatbot_a: {chatbot_a} chatbot_b: {chatbot_b}')
|
| 244 |
return chatbot_a, chatbot_b
|
| 245 |
|
| 246 |
def clear():
|
| 247 |
return [], []
|
| 248 |
|
| 249 |
+
def handle_vote(selection, chatbot_a, chatbot_b):
|
| 250 |
if selection == "Bot A kicks ass!":
|
| 251 |
+
chatbot_a.append(["π", f"Thanks, man. I am {chatbot_a_name}"])
|
| 252 |
+
chatbot_b.append(["π©", f"Pffff β¦ I am {chatbot_b_name}"])
|
| 253 |
+
chatbot_a_vote = "winner"
|
| 254 |
+
chatbot_b_vote = "looser"
|
| 255 |
elif selection == "Bot B crushes it!":
|
| 256 |
+
chatbot_a.append(["π€‘", f"Rigged β¦ I am {chatbot_a_name}"])
|
| 257 |
+
chatbot_b.append(["π₯", f"Well deserved! I am {chatbot_b_name}"])
|
| 258 |
+
chatbot_a_vote = "looser"
|
| 259 |
+
chatbot_b_vote = "winner"
|
| 260 |
else:
|
| 261 |
+
chatbot_a.append(["π€", f"Lame β¦ I am {chatbot_a_name}"])
|
| 262 |
+
chatbot_b.append(["π€", f"Dunno. I am {chatbot_b_name}"])
|
| 263 |
+
chatbot_a_vote = "draw"
|
| 264 |
+
chatbot_b_vote = "draw"
|
| 265 |
+
logging.debug(f'Casting vote: {chatbot_a_name}: {chatbot_a_vote}, {chatbot_b_name}: {chatbot_b_vote}')
|
| 266 |
return chatbot_a, chatbot_b
|
| 267 |
|
| 268 |
with gr.Blocks() as demo:
|
|
|
|
| 304 |
outputs=[system_prompt]
|
| 305 |
)
|
| 306 |
|
| 307 |
+
better_bot.select(handle_vote, inputs=[better_bot, chatbot_a, chatbot_b], outputs=[chatbot_a, chatbot_b])
|
| 308 |
input_text.submit(generate_both, inputs=[system_prompt, input_text, chatbot_a, chatbot_b, max_new_tokens, temperature, top_p, repetition_penalty], outputs=[chatbot_a, chatbot_b])
|
| 309 |
submit_btn.click(generate_both, inputs=[system_prompt, input_text, chatbot_a, chatbot_b, max_new_tokens, temperature, top_p, repetition_penalty], outputs=[chatbot_a, chatbot_b])
|
| 310 |
clear_btn.click(clear, outputs=[chatbot_a, chatbot_b])
|