Spaces:
Build error
Build error
File size: 5,440 Bytes
4ffd659 19513c9 4ffd659 19513c9 4ffd659 19513c9 4ffd659 19513c9 4ffd659 19513c9 4ffd659 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
import sqlite3
import time
from termcolor import colored
from llmonitor import agent
from queriers import together, cohere, openai_func, openrouter, ai21, alephalpha
db = sqlite3.connect("./database.db")
db.row_factory = sqlite3.Row
cursor = db.cursor()
def remove_end(s, suffix):
if s.endswith(suffix):
return s[:-len(suffix)]
return s
# Fetch models
models = cursor.execute("SELECT * FROM models").fetchall()
models = [dict(model) for model in models]
# Fetch prompts
prompts = cursor.execute("SELECT * FROM prompts").fetchall()
prompts = [dict(prompt) for prompt in prompts]
def get_results():
results = cursor.execute("SELECT * FROM results").fetchall()
print(results[0].keys())
return [dict(result) for result in results]
def insert_result(modelId, promptId, result, duration, rate):
cursor.execute(
"INSERT INTO results (model, prompt, result, duration, rate) VALUES (?, ?, ?, ?, ?)",
(modelId, promptId, result, duration, rate)
)
db.commit()
pass
def check_if_results_exist(modelId, promptId):
results = cursor.execute(
"SELECT * FROM results WHERE model = ? AND prompt = ? LIMIT 1", (modelId, promptId)
).fetchall()
return len(results) > 0
def ask_prompt(prompt, model):
exists = check_if_results_exist(model["id"], prompt["id"])
if exists:
print("Skipping, already got benchmark")
return
mapping = {
"together": together,
"cohere": cohere, # Add these functions to the mapping once they are translated
"openai": openai_func,
"openrouter": openrouter,
"ai21": ai21,
# "alephalpha": alephalpha # TODO: get a working API key
}
querier = mapping.get(model["api"])
if not querier:
print(f"No querier for {model['api']}")
return
print(f"Querying {model['name']}")
start_time = time.time()
try:
response_text = querier(model, prompt)
# Remove newlines and trailing spaces + stop sequence
cleaned = response_text.strip()
if prompt["stop"]:
cleaned = remove_end(cleaned, prompt["stop"])
end_time = time.time()
duration = end_time - start_time
chars_per_second = round(len(response_text) / duration, 2)
print("------------------------------------")
print(f"Result: {cleaned}")
print(f"Took {duration*1000} ms ({chars_per_second} chars/s)")
print("------------------------------------")
insert_result(model["id"], prompt["id"], cleaned, duration*1000, chars_per_second)
except Exception as e:
print(f"Error querying {model['name']}", e)
total_benchmarks = len(models) * len(prompts)
print(f"Running {total_benchmarks} benchmarks")
# # Run prompts
# for model in models:
# if model["type"] == "language":
# continue
# for prompt in prompts:
# if prompt["type"] != "code" and model["type"] == "code":
# print("Skipping non-code benchmark for code model")
# continue
# ask_prompt(prompt, model)
# Calculate scores
results = get_results()
@agent(name="RateResult")
def rate_result(result):
rubrics = cursor.execute(
"SELECT * FROM rubrics WHERE prompt = ?",
(result["prompt"],)
).fetchall()
has_rubrics = len(rubrics) > 0
if not has_rubrics:
return
print(colored('---------------------------', 'white'))
print(colored('----------RATING-----------', 'white'))
print(colored('---------------------------', 'white'))
print(colored(result["result"], 'cyan'))
print(colored('---------------------------', 'white'))
score = None
for rubric in rubrics:
print('Rubric: '+colored(rubric["grading"], 'magenta'))
if result["result"].strip() == "":
score = 0
else:
grading_text = (
f'You help verify that the following answer match this condition: the answer {rubric["grading"]}. Note: the answer might be imcomplete, in which case do your best to assess based on what the full result would be.\n\n'
f'\n\n--START OF THE ANSWER--\n{result["result"]}\n--END OF THE ANSWER--\n\n'
f'Take a deep breath and explain step by step how you come to the conclusion.'
f'Finally, reply on the last line with YES if the following answer matches this condition (otherwies reply NO).'
)
# get gpt-4 model
gpt4 = next((item for item in models if item['api_id'] == 'gpt-4'), None)
prompt = { }
response_text = openai_func(gpt4, {"text": grading_text})
print(colored(f"-> {response_text}", 'yellow'))
last_line = response_text.splitlines()[-1]
# If it includes a yes, then it's valid
if "YES" in last_line:
print(colored(f'Valid! + {rubric["points"]} points', 'green'))
score = rubric["points"] if score is None else score + rubric["points"]
print('Final score: '+colored(score, 'cyan'))
return score
for result in results:
if not result["score"]:
score = rate_result(result)
if score is not None:
cursor.execute(
"UPDATE results SET score = ? WHERE id == ?",
(score, result["id"])
)
db.commit()
db.close() |