python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
"""
Author: Joon Sung Park ([email protected])
File: gpt_structure.py
Description: Wrapper functions for calling OpenAI APIs.
"""
import json
import random
import openai
import time
from utils import *
openai.api_key = openai_api_key
def temp_sleep(seconds=0.1):
time.sleep(seconds)
def ChatGPT_single_request(prompt):
temp_sleep()
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]
# ============================================================================
# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ######################
# ============================================================================
def GPT4_request(prompt):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
server and returns the response.
ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
"""
temp_sleep()
try:
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]
except:
print ("ChatGPT ERROR")
return "ChatGPT ERROR"
def ChatGPT_request(prompt):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
server and returns the response.
ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
"""
# temp_sleep()
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]
except:
print ("ChatGPT ERROR")
return "ChatGPT ERROR"
def GPT4_safe_generate_response(prompt,
example_output,
special_instruction,
repeat=3,
fail_safe_response="error",
func_validate=None,
func_clean_up=None,
verbose=False):
prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'
if verbose:
print ("CHAT GPT PROMPT")
print (prompt)
for i in range(repeat):
try:
curr_gpt_response = GPT4_request(prompt).strip()
end_index = curr_gpt_response.rfind('}') + 1
curr_gpt_response = curr_gpt_response[:end_index]
curr_gpt_response = json.loads(curr_gpt_response)["output"]
if func_validate(curr_gpt_response, prompt=prompt):
return func_clean_up(curr_gpt_response, prompt=prompt)
if verbose:
print ("---- repeat count: \n", i, curr_gpt_response)
print (curr_gpt_response)
print ("~~~~")
except:
pass
return False
def ChatGPT_safe_generate_response(prompt,
example_output,
special_instruction,
repeat=3,
fail_safe_response="error",
func_validate=None,
func_clean_up=None,
verbose=False):
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
prompt = '"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'
if verbose:
print ("CHAT GPT PROMPT")
print (prompt)
for i in range(repeat):
try:
curr_gpt_response = ChatGPT_request(prompt).strip()
end_index = curr_gpt_response.rfind('}') + 1
curr_gpt_response = curr_gpt_response[:end_index]
curr_gpt_response = json.loads(curr_gpt_response)["output"]
# print ("---ashdfaf")
# print (curr_gpt_response)
# print ("000asdfhia")
if func_validate(curr_gpt_response, prompt=prompt):
return func_clean_up(curr_gpt_response, prompt=prompt)
if verbose:
print ("---- repeat count: \n", i, curr_gpt_response)
print (curr_gpt_response)
print ("~~~~")
except:
pass
return False
def ChatGPT_safe_generate_response_OLD(prompt,
repeat=3,
fail_safe_response="error",
func_validate=None,
func_clean_up=None,
verbose=False):
if verbose:
print ("CHAT GPT PROMPT")
print (prompt)
for i in range(repeat):
try:
curr_gpt_response = ChatGPT_request(prompt).strip()
if func_validate(curr_gpt_response, prompt=prompt):
return func_clean_up(curr_gpt_response, prompt=prompt)
if verbose:
print (f"---- repeat count: {i}")
print (curr_gpt_response)
print ("~~~~")
except:
pass
print ("FAIL SAFE TRIGGERED")
return fail_safe_response
# ============================================================================
# ###################[SECTION 2: ORIGINAL GPT-3 STRUCTURE] ###################
# ============================================================================
def GPT_request(prompt, gpt_parameter):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
server and returns the response.
ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
"""
temp_sleep()
try:
response = openai.Completion.create(
model=gpt_parameter["engine"],
prompt=prompt,
temperature=gpt_parameter["temperature"],
max_tokens=gpt_parameter["max_tokens"],
top_p=gpt_parameter["top_p"],
frequency_penalty=gpt_parameter["frequency_penalty"],
presence_penalty=gpt_parameter["presence_penalty"],
stream=gpt_parameter["stream"],
stop=gpt_parameter["stop"],)
return response.choices[0].text
except:
print ("TOKEN LIMIT EXCEEDED")
return "TOKEN LIMIT EXCEEDED"
def generate_prompt(curr_input, prompt_lib_file):
"""
Takes in the current input (e.g. comment that you want to classifiy) and
the path to a prompt file. The prompt file contains the raw str prompt that
will be used, which contains the following substr: !<INPUT>! -- this
function replaces this substr with the actual curr_input to produce the
final promopt that will be sent to the GPT3 server.
ARGS:
curr_input: the input we want to feed in (IF THERE ARE MORE THAN ONE
INPUT, THIS CAN BE A LIST.)
prompt_lib_file: the path to the promopt file.
RETURNS:
a str prompt that will be sent to OpenAI's GPT server.
"""
if type(curr_input) == type("string"):
curr_input = [curr_input]
curr_input = [str(i) for i in curr_input]
f = open(prompt_lib_file, "r")
prompt = f.read()
f.close()
for count, i in enumerate(curr_input):
prompt = prompt.replace(f"!<INPUT {count}>!", i)
if "<commentblockmarker>###</commentblockmarker>" in prompt:
prompt = prompt.split("<commentblockmarker>###</commentblockmarker>")[1]
return prompt.strip()
def safe_generate_response(prompt,
gpt_parameter,
repeat=5,
fail_safe_response="error",
func_validate=None,
func_clean_up=None,
verbose=False):
if verbose:
print (prompt)
for i in range(repeat):
curr_gpt_response = GPT_request(prompt, gpt_parameter)
if func_validate(curr_gpt_response, prompt=prompt):
return func_clean_up(curr_gpt_response, prompt=prompt)
if verbose:
print ("---- repeat count: ", i, curr_gpt_response)
print (curr_gpt_response)
print ("~~~~")
return fail_safe_response
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
if not text:
text = "this is blank"
return openai.Embedding.create(
input=[text], model=model)['data'][0]['embedding']
if __name__ == '__main__':
gpt_parameter = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0,
"stop": ['"']}
curr_input = ["driving to a friend's house"]
prompt_lib_file = "prompt_template/test_prompt_July5.txt"
prompt = generate_prompt(curr_input, prompt_lib_file)
def __func_validate(gpt_response):
if len(gpt_response.strip()) <= 1:
return False
if len(gpt_response.strip().split(" ")) > 1:
return False
return True
def __func_clean_up(gpt_response):
cleaned_response = gpt_response.strip()
return cleaned_response
output = safe_generate_response(prompt,
gpt_parameter,
5,
"rest",
__func_validate,
__func_clean_up,
True)
print (output)
| generative_agents-main | reverie/backend_server/persona/prompt_template/gpt_structure.py |
"""
Author: Joon Sung Park ([email protected])
File: run_gpt_prompt.py
Description: Defines all run gpt prompt functions. These functions directly
interface with the safe_generate_response function.
"""
import re
import datetime
import sys
import ast
sys.path.append('../../')
from global_methods import *
from persona.prompt_template.gpt_structure import *
from persona.prompt_template.print_prompt import *
def get_random_alphanumeric(i=6, j=6):
"""
Returns a random alpha numeric strength that has the length of somewhere
between i and j.
INPUT:
i: min_range for the length
j: max_range for the length
OUTPUT:
an alpha numeric str with the length of somewhere between i and j.
"""
k = random.randint(i, j)
x = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
return x
##############################################################################
# CHAPTER 1: Run GPT Prompt
##############################################################################
def run_gpt_prompt_wake_up_hour(persona, test_input=None, verbose=False):
"""
Given the persona, returns an integer that indicates the hour when the
persona wakes up.
INPUT:
persona: The Persona class instance
OUTPUT:
integer for the wake up hour.
"""
def create_prompt_input(persona, test_input=None):
if test_input: return test_input
prompt_input = [persona.scratch.get_str_iss(),
persona.scratch.get_str_lifestyle(),
persona.scratch.get_str_firstname()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = int(gpt_response.strip().lower().split("am")[0])
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except: return False
return True
def get_fail_safe():
fs = 8
return fs
gpt_param = {"engine": "text-davinci-002", "max_tokens": 5,
"temperature": 0.8, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/wake_up_hour_v1.txt"
prompt_input = create_prompt_input(persona, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_daily_plan(persona,
wake_up_hour,
test_input=None,
verbose=False):
"""
Basically the long term planning that spans a day. Returns a list of actions
that the persona will take today. Usually comes in the following form:
'wake up and complete the morning routine at 6:00 am',
'eat breakfast at 7:00 am',..
Note that the actions come without a period.
INPUT:
persona: The Persona class instance
OUTPUT:
a list of daily actions in broad strokes.
"""
def create_prompt_input(persona, wake_up_hour, test_input=None):
if test_input: return test_input
prompt_input = []
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [persona.scratch.get_str_lifestyle()]
prompt_input += [persona.scratch.get_str_curr_date_str()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [f"{str(wake_up_hour)}:00 am"]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = []
_cr = gpt_response.split(")")
for i in _cr:
if i[-1].isdigit():
i = i[:-1].strip()
if i[-1] == "." or i[-1] == ",":
cr += [i[:-1].strip()]
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except:
return False
return True
def get_fail_safe():
fs = ['wake up and complete the morning routine at 6:00 am',
'eat breakfast at 7:00 am',
'read a book from 8:00 am to 12:00 pm',
'have lunch at 12:00 pm',
'take a nap from 1:00 pm to 4:00 pm',
'relax and watch TV from 7:00 pm to 8:00 pm',
'go to bed at 11:00 pm']
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 500,
"temperature": 1, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/daily_planning_v6.txt"
prompt_input = create_prompt_input(persona, wake_up_hour, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = ([f"wake up and complete the morning routine at {wake_up_hour}:00 am"]
+ output)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_hourly_schedule(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2=None,
test_input=None,
verbose=False):
def create_prompt_input(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2=None,
test_input=None):
if test_input: return test_input
schedule_format = ""
for i in hour_str:
schedule_format += f"[{persona.scratch.get_str_curr_date_str()} -- {i}]"
schedule_format += f" Activity: [Fill in]\n"
schedule_format = schedule_format[:-1]
intermission_str = f"Here the originally intended hourly breakdown of"
intermission_str += f" {persona.scratch.get_str_firstname()}'s schedule today: "
for count, i in enumerate(persona.scratch.daily_req):
intermission_str += f"{str(count+1)}) {i}, "
intermission_str = intermission_str[:-2]
prior_schedule = ""
if p_f_ds_hourly_org:
prior_schedule = "\n"
for count, i in enumerate(p_f_ds_hourly_org):
prior_schedule += f"[(ID:{get_random_alphanumeric()})"
prior_schedule += f" {persona.scratch.get_str_curr_date_str()} --"
prior_schedule += f" {hour_str[count]}] Activity:"
prior_schedule += f" {persona.scratch.get_str_firstname()}"
prior_schedule += f" is {i}\n"
prompt_ending = f"[(ID:{get_random_alphanumeric()})"
prompt_ending += f" {persona.scratch.get_str_curr_date_str()}"
prompt_ending += f" -- {curr_hour_str}] Activity:"
prompt_ending += f" {persona.scratch.get_str_firstname()} is"
if intermission2:
intermission2 = f"\n{intermission2}"
prompt_input = []
prompt_input += [schedule_format]
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [prior_schedule + "\n"]
prompt_input += [intermission_str]
if intermission2:
prompt_input += [intermission2]
else:
prompt_input += [""]
prompt_input += [prompt_ending]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if cr[-1] == ".":
cr = cr[:-1]
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except: return False
return True
def get_fail_safe():
fs = "asleep"
return fs
# # ChatGPT Plugin ===========================================================
# def __chat_func_clean_up(gpt_response, prompt=""): ############
# cr = gpt_response.strip()
# if cr[-1] == ".":
# cr = cr[:-1]
# return cr
# def __chat_func_validate(gpt_response, prompt=""): ############
# try: __func_clean_up(gpt_response, prompt="")
# except: return False
# return True
# print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 10") ########
# gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v3_ChatGPT/generate_hourly_schedule_v2.txt" ########
# prompt_input = create_prompt_input(persona,
# curr_hour_str,
# p_f_ds_hourly_org,
# hour_str,
# intermission2,
# test_input) ########
# prompt = generate_prompt(prompt_input, prompt_template)
# example_output = "studying for her music classes" ########
# special_instruction = "The output should ONLY include the part of the sentence that completes the last line in the schedule above." ########
# fail_safe = get_fail_safe() ########
# output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
# __chat_func_validate, __chat_func_clean_up, True)
# if output != False:
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# # ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_hourly_schedule_v2.txt"
prompt_input = create_prompt_input(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_task_decomp(persona,
task,
duration,
test_input=None,
verbose=False):
def create_prompt_input(persona, task, duration, test_input=None):
"""
Today is Saturday June 25. From 00:00 ~ 06:00am, Maeve is
planning on sleeping, 06:00 ~ 07:00am, Maeve is
planning on waking up and doing her morning routine,
and from 07:00am ~08:00am, Maeve is planning on having breakfast.
"""
curr_f_org_index = persona.scratch.get_f_daily_schedule_hourly_org_index()
all_indices = []
# if curr_f_org_index > 0:
# all_indices += [curr_f_org_index-1]
all_indices += [curr_f_org_index]
if curr_f_org_index+1 <= len(persona.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index+1]
if curr_f_org_index+2 <= len(persona.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index+2]
curr_time_range = ""
print ("DEBUG")
print (persona.scratch.f_daily_schedule_hourly_org)
print (all_indices)
summ_str = f'Today is {persona.scratch.curr_time.strftime("%B %d, %Y")}. '
summ_str += f'From '
for index in all_indices:
print ("index", index)
if index < len(persona.scratch.f_daily_schedule_hourly_org):
start_min = 0
for i in range(index):
start_min += persona.scratch.f_daily_schedule_hourly_org[i][1]
end_min = start_min + persona.scratch.f_daily_schedule_hourly_org[index][1]
start_time = (datetime.datetime.strptime("00:00:00", "%H:%M:%S")
+ datetime.timedelta(minutes=start_min))
end_time = (datetime.datetime.strptime("00:00:00", "%H:%M:%S")
+ datetime.timedelta(minutes=end_min))
start_time_str = start_time.strftime("%H:%M%p")
end_time_str = end_time.strftime("%H:%M%p")
summ_str += f"{start_time_str} ~ {end_time_str}, {persona.name} is planning on {persona.scratch.f_daily_schedule_hourly_org[index][0]}, "
if curr_f_org_index+1 == index:
curr_time_range = f'{start_time_str} ~ {end_time_str}'
summ_str = summ_str[:-2] + "."
prompt_input = []
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [summ_str]
# prompt_input += [persona.scratch.get_str_curr_date_str()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [task]
prompt_input += [curr_time_range]
prompt_input += [duration]
prompt_input += [persona.scratch.get_str_firstname()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print ("TOODOOOOOO")
print (gpt_response)
print ("-==- -==- -==- ")
# TODO SOMETHING HERE sometimes fails... See screenshot
temp = [i.strip() for i in gpt_response.split("\n")]
_cr = []
cr = []
for count, i in enumerate(temp):
if count != 0:
_cr += [" ".join([j.strip () for j in i.split(" ")][3:])]
else:
_cr += [i]
for count, i in enumerate(_cr):
k = [j.strip() for j in i.split("(duration in minutes:")]
task = k[0]
if task[-1] == ".":
task = task[:-1]
duration = int(k[1].split(",")[0].strip())
cr += [[task, duration]]
total_expected_min = int(prompt.split("(total duration in minutes")[-1]
.split("):")[0].strip())
# TODO -- now, you need to make sure that this is the same as the sum of
# the current action sequence.
curr_min_slot = [["dummy", -1],] # (task_name, task_index)
for count, i in enumerate(cr):
i_task = i[0]
i_duration = i[1]
i_duration -= (i_duration % 5)
if i_duration > 0:
for j in range(i_duration):
curr_min_slot += [(i_task, count)]
curr_min_slot = curr_min_slot[1:]
if len(curr_min_slot) > total_expected_min:
last_task = curr_min_slot[60]
for i in range(1, 6):
curr_min_slot[-1 * i] = last_task
elif len(curr_min_slot) < total_expected_min:
last_task = curr_min_slot[-1]
for i in range(total_expected_min - len(curr_min_slot)):
curr_min_slot += [last_task]
cr_ret = [["dummy", -1],]
for task, task_index in curr_min_slot:
if task != cr_ret[-1][0]:
cr_ret += [[task, 1]]
else:
cr_ret[-1][1] += 1
cr = cr_ret[1:]
return cr
def __func_validate(gpt_response, prompt=""):
# TODO -- this sometimes generates error
try:
__func_clean_up(gpt_response)
except:
pass
# return False
return gpt_response
def get_fail_safe():
fs = ["asleep"]
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/task_decomp_v3.txt"
prompt_input = create_prompt_input(persona, task, duration)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
print ("?????")
print (prompt)
output = safe_generate_response(prompt, gpt_param, 5, get_fail_safe(),
__func_validate, __func_clean_up)
# TODO THERE WAS A BUG HERE...
# This is for preventing overflows...
"""
File "/Users/joonsungpark/Desktop/Stanford/Projects/
generative-personas/src_exploration/reverie_simulation/
brain/get_next_action_v3.py", line 364, in run_gpt_prompt_task_decomp
fin_output[-1][1] += (duration - ftime_sum)
IndexError: list index out of range
"""
print ("IMPORTANT VVV DEBUG")
# print (prompt_input)
# print (prompt)
print (output)
fin_output = []
time_sum = 0
for i_task, i_duration in output:
time_sum += i_duration
# HM?????????
# if time_sum < duration:
if time_sum <= duration:
fin_output += [[i_task, i_duration]]
else:
break
ftime_sum = 0
for fi_task, fi_duration in fin_output:
ftime_sum += fi_duration
# print ("for debugging... line 365", fin_output)
fin_output[-1][1] += (duration - ftime_sum)
output = fin_output
task_decomp = output
ret = []
for decomp_task, duration in task_decomp:
ret += [[f"{task} ({decomp_task})", duration]]
output = ret
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_sector(action_description,
persona,
maze,
test_input=None,
verbose=False):
def create_prompt_input(action_description, persona, maze, test_input=None):
act_world = f"{maze.access_tile(persona.scratch.curr_tile)['world']}"
prompt_input = []
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [persona.scratch.living_area.split(":")[1]]
x = f"{act_world}:{persona.scratch.living_area.split(':')[1]}"
prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [f"{maze.access_tile(persona.scratch.curr_tile)['sector']}"]
x = f"{act_world}:{maze.access_tile(persona.scratch.curr_tile)['sector']}"
prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)]
if persona.scratch.get_str_daily_plan_req() != "":
prompt_input += [f"\n{persona.scratch.get_str_daily_plan_req()}"]
else:
prompt_input += [""]
# MAR 11 TEMP
accessible_sector_str = persona.s_mem.get_str_accessible_sectors(act_world)
curr = accessible_sector_str.split(", ")
fin_accessible_sectors = []
for i in curr:
if "'s house" in i:
if persona.scratch.last_name in i:
fin_accessible_sectors += [i]
else:
fin_accessible_sectors += [i]
accessible_sector_str = ", ".join(fin_accessible_sectors)
# END MAR 11 TEMP
prompt_input += [accessible_sector_str]
action_description_1 = action_description
action_description_2 = action_description
if "(" in action_description:
action_description_1 = action_description.split("(")[0].strip()
action_description_2 = action_description.split("(")[-1][:-1]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [action_description_1]
prompt_input += [action_description_2]
prompt_input += [persona.scratch.get_str_name()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.split("}")[0]
return cleaned_response
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
if "}" not in gpt_response:
return False
if "," in gpt_response:
return False
return True
def get_fail_safe():
fs = ("kitchen")
return fs
# # ChatGPT Plugin ===========================================================
# def __chat_func_clean_up(gpt_response, prompt=""): ############
# cr = gpt_response.strip()
# return cr
# def __chat_func_validate(gpt_response, prompt=""): ############
# try:
# gpt_response = __func_clean_up(gpt_response, prompt="")
# except:
# return False
# return True
# print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 20") ########
# gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v3_ChatGPT/action_location_sector_v2.txt" ########
# prompt_input = create_prompt_input(action_description, persona, maze) ########
# prompt = generate_prompt(prompt_input, prompt_template)
# example_output = "Johnson Park" ########
# special_instruction = "The value for the output must contain one of the area options above verbatim (including lower/upper case)." ########
# fail_safe = get_fail_safe() ########
# output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
# __chat_func_validate, __chat_func_clean_up, True)
# if output != False:
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# # ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_location_sector_v1.txt"
prompt_input = create_prompt_input(action_description, persona, maze)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
y = f"{maze.access_tile(persona.scratch.curr_tile)['world']}"
x = [i.strip() for i in persona.s_mem.get_str_accessible_sectors(y).split(",")]
if output not in x:
# output = random.choice(x)
output = persona.scratch.living_area.split(":")[1]
print ("DEBUG", random.choice(x), "------", output)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_arena(action_description,
persona,
maze, act_world, act_sector,
test_input=None,
verbose=False):
def create_prompt_input(action_description, persona, maze, act_world, act_sector, test_input=None):
prompt_input = []
# prompt_input += [persona.scratch.get_str_name()]
# prompt_input += [maze.access_tile(persona.scratch.curr_tile)["arena"]]
# prompt_input += [maze.access_tile(persona.scratch.curr_tile)["sector"]]
prompt_input += [persona.scratch.get_str_name()]
x = f"{act_world}:{act_sector}"
prompt_input += [act_sector]
# MAR 11 TEMP
accessible_arena_str = persona.s_mem.get_str_accessible_sector_arenas(x)
curr = accessible_arena_str.split(", ")
fin_accessible_arenas = []
for i in curr:
if "'s room" in i:
if persona.scratch.last_name in i:
fin_accessible_arenas += [i]
else:
fin_accessible_arenas += [i]
accessible_arena_str = ", ".join(fin_accessible_arenas)
# END MAR 11 TEMP
prompt_input += [accessible_arena_str]
action_description_1 = action_description
action_description_2 = action_description
if "(" in action_description:
action_description_1 = action_description.split("(")[0].strip()
action_description_2 = action_description.split("(")[-1][:-1]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [action_description_1]
prompt_input += [action_description_2]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [act_sector]
prompt_input += [accessible_arena_str]
# prompt_input += [maze.access_tile(persona.scratch.curr_tile)["arena"]]
# x = f"{maze.access_tile(persona.scratch.curr_tile)['world']}:{maze.access_tile(persona.scratch.curr_tile)['sector']}:{maze.access_tile(persona.scratch.curr_tile)['arena']}"
# prompt_input += [persona.s_mem.get_str_accessible_arena_game_objects(x)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.split("}")[0]
return cleaned_response
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
if "}" not in gpt_response:
return False
if "," in gpt_response:
return False
return True
def get_fail_safe():
fs = ("kitchen")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_location_object_vMar11.txt"
prompt_input = create_prompt_input(action_description, persona, maze, act_world, act_sector)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
print (output)
# y = f"{act_world}:{act_sector}"
# x = [i.strip() for i in persona.s_mem.get_str_accessible_sector_arenas(y).split(",")]
# if output not in x:
# output = random.choice(x)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_game_object(action_description,
persona,
maze,
temp_address,
test_input=None,
verbose=False):
def create_prompt_input(action_description,
persona,
temp_address,
test_input=None):
prompt_input = []
if "(" in action_description:
action_description = action_description.split("(")[-1][:-1]
prompt_input += [action_description]
prompt_input += [persona
.s_mem.get_str_accessible_arena_game_objects(temp_address)]
return prompt_input
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
return True
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.strip()
return cleaned_response
def get_fail_safe():
fs = ("bed")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_object_v2.txt"
prompt_input = create_prompt_input(action_description,
persona,
temp_address,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
x = [i.strip() for i in persona.s_mem.get_str_accessible_arena_game_objects(temp_address).split(",")]
if output not in x:
output = random.choice(x)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_pronunciatio(action_description, persona, verbose=False):
def create_prompt_input(action_description):
if "(" in action_description:
action_description = action_description.split("(")[-1].split(")")[0]
prompt_input = [action_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if len(cr) > 3:
cr = cr[:3]
return cr
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt="")
if len(gpt_response) == 0:
return False
except: return False
return True
def get_fail_safe():
fs = "π"
return fs
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
cr = gpt_response.strip()
if len(cr) > 3:
cr = cr[:3]
return cr
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt="")
if len(gpt_response) == 0:
return False
except: return False
return True
return True
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 4") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_pronunciatio_v1.txt" ########
prompt_input = create_prompt_input(action_description) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "ππ§ββοΈ" ########
special_instruction = "The value for the output must ONLY contain the emojis." ########
fail_safe = get_fail_safe()
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
# prompt_template = "persona/prompt_template/v2/generate_pronunciatio_v1.txt"
# prompt_input = create_prompt_input(action_description)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_event_triple(action_description, persona, verbose=False):
def create_prompt_input(action_description, persona):
if "(" in action_description:
action_description = action_description.split("(")[-1].split(")")[0]
prompt_input = [persona.name,
action_description,
persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
if len(gpt_response) != 2:
return False
except: return False
return True
def get_fail_safe(persona):
fs = (persona.name, "is", "idle")
return fs
# ChatGPT Plugin ===========================================================
# def __chat_func_clean_up(gpt_response, prompt=""): ############
# cr = gpt_response.strip()
# cr = [i.strip() for i in cr.split(")")[0].split(",")]
# return cr
# def __chat_func_validate(gpt_response, prompt=""): ############
# try:
# gpt_response = __func_clean_up(gpt_response, prompt="")
# if len(gpt_response) != 2:
# return False
# except: return False
# return True
# print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 5") ########
# gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v3_ChatGPT/generate_event_triple_v1.txt" ########
# prompt_input = create_prompt_input(action_description, persona) ########
# prompt = generate_prompt(prompt_input, prompt_template)
# example_output = "(Jane Doe, cooking, breakfast)" ########
# special_instruction = "The value for the output must ONLY contain the triple. If there is an incomplete element, just say 'None' but there needs to be three elements no matter what." ########
# fail_safe = get_fail_safe(persona) ########
# output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
# __chat_func_validate, __chat_func_clean_up, True)
# if output != False:
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
prompt_input = create_prompt_input(action_description, persona)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(persona) ########
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = (persona.name, output[0], output[1])
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False):
def create_prompt_input(act_game_object, act_desp, persona):
prompt_input = [act_game_object,
persona.name,
act_desp,
act_game_object,
act_game_object]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if cr[-1] == ".": cr = cr[:-1]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
except:
return False
return True
def get_fail_safe(act_game_object):
fs = f"{act_game_object} is idle"
return fs
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
cr = gpt_response.strip()
if cr[-1] == ".": cr = cr[:-1]
return cr
def __chat_func_validate(gpt_response, prompt=""): ############
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
except:
return False
return True
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 6") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_obj_event_v1.txt" ########
prompt_input = create_prompt_input(act_game_object, act_desp, persona) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "being fixed" ########
special_instruction = "The output should ONLY contain the phrase that should go in <fill in>." ########
fail_safe = get_fail_safe(act_game_object) ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
# prompt_template = "persona/prompt_template/v2/generate_obj_event_v1.txt"
# prompt_input = create_prompt_input(act_game_object, act_desp, persona)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe(act_game_object)
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_act_obj_event_triple(act_game_object, act_obj_desc, persona, verbose=False):
def create_prompt_input(act_game_object, act_obj_desc):
prompt_input = [act_game_object,
act_obj_desc,
act_game_object]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
if len(gpt_response) != 2:
return False
except: return False
return True
def get_fail_safe(act_game_object):
fs = (act_game_object, "is", "idle")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_obj_desc)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(act_game_object)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = (act_game_object, output[0], output[1])
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_new_decomp_schedule(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input=None,
verbose=False):
def create_prompt_input(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input=None):
persona_name = persona.name
start_hour_str = start_time_hour.strftime("%H:%M %p")
end_hour_str = end_time_hour.strftime("%H:%M %p")
original_plan = ""
for_time = start_time_hour
for i in main_act_dur:
original_plan += f'{for_time.strftime("%H:%M")} ~ {(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
original_plan += "\n"
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init = ""
for_time = start_time_hour
for count, i in enumerate(truncated_act_dur):
new_plan_init += f'{for_time.strftime("%H:%M")} ~ {(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
new_plan_init += "\n"
if count < len(truncated_act_dur) - 1:
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init += (for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M") + " ~"
prompt_input = [persona_name,
start_hour_str,
end_hour_str,
original_plan,
persona_name,
inserted_act,
inserted_act_dur,
persona_name,
start_hour_str,
end_hour_str,
end_hour_str,
new_plan_init]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
new_schedule = prompt + " " + gpt_response.strip()
new_schedule = new_schedule.split("The revised schedule:")[-1].strip()
new_schedule = new_schedule.split("\n")
ret_temp = []
for i in new_schedule:
ret_temp += [i.split(" -- ")]
ret = []
for time_str, action in ret_temp:
start_time = time_str.split(" ~ ")[0].strip()
end_time = time_str.split(" ~ ")[1].strip()
delta = datetime.datetime.strptime(end_time, "%H:%M") - datetime.datetime.strptime(start_time, "%H:%M")
delta_min = int(delta.total_seconds()/60)
if delta_min < 0: delta_min = 0
ret += [[action, delta_min]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt)
dur_sum = 0
for act, dur in gpt_response:
dur_sum += dur
if str(type(act)) != "<class 'str'>":
return False
if str(type(dur)) != "<class 'int'>":
return False
x = prompt.split("\n")[0].split("originally planned schedule from")[-1].strip()[:-1]
x = [datetime.datetime.strptime(i.strip(), "%H:%M %p") for i in x.split(" to ")]
delta_min = int((x[1] - x[0]).total_seconds()/60)
if int(dur_sum) != int(delta_min):
return False
except:
return False
return True
def get_fail_safe(main_act_dur, truncated_act_dur):
dur_sum = 0
for act, dur in main_act_dur: dur_sum += dur
ret = truncated_act_dur[:]
ret += main_act_dur[len(ret)-1:]
# If there are access, we need to trim...
ret_dur_sum = 0
count = 0
over = None
for act, dur in ret:
ret_dur_sum += dur
if ret_dur_sum == dur_sum:
break
if ret_dur_sum > dur_sum:
over = ret_dur_sum - dur_sum
break
count += 1
if over:
ret = ret[:count+1]
ret[-1][1] -= over
return ret
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/new_decomp_schedule_v1.txt"
prompt_input = create_prompt_input(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(main_act_dur, truncated_act_dur)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
# print ("* * * * output")
# print (output)
# print ('* * * * fail_safe')
# print (fail_safe)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_decide_to_talk(persona, target_persona, retrieved,test_input=None,
verbose=False):
def create_prompt_input(init_persona, target_persona, retrieved,
test_input=None):
last_chat = init_persona.a_mem.get_last_chat(target_persona.name)
last_chatted_time = ""
last_chat_about = ""
if last_chat:
last_chatted_time = last_chat.created.strftime("%B %d, %Y, %H:%M:%S")
last_chat_about = last_chat.description
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = init_persona.scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(init_persona.scratch.planned_path) == 0 and "waiting" not in init_act_desc:
init_p_desc = f"{init_persona.name} is already {init_act_desc}"
elif "waiting" in init_act_desc:
init_p_desc = f"{init_persona.name} is {init_act_desc}"
else:
init_p_desc = f"{init_persona.name} is on the way to {init_act_desc}"
target_act_desc = target_persona.scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_persona.scratch.planned_path) == 0 and "waiting" not in init_act_desc:
target_p_desc = f"{target_persona.name} is already {target_act_desc}"
elif "waiting" in init_act_desc:
target_p_desc = f"{init_persona.name} is {init_act_desc}"
else:
target_p_desc = f"{target_persona.name} is on the way to {target_act_desc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [last_chatted_time]
prompt_input += [last_chat_about]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
return prompt_input
def __func_validate(gpt_response, prompt=""):
try:
if gpt_response.split("Answer in yes or no:")[-1].strip().lower() in ["yes", "no"]:
return True
return False
except:
return False
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split("Answer in yes or no:")[-1].strip().lower()
def get_fail_safe():
fs = "yes"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/decide_to_talk_v2.txt"
prompt_input = create_prompt_input(persona, target_persona, retrieved,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input=None,
verbose=False):
def create_prompt_input(init_persona, target_persona, retrieved,
test_input=None):
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = init_persona.scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(init_persona.scratch.planned_path) == 0:
loc = ""
if ":" in init_persona.scratch.act_address:
loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2]
init_p_desc = f"{init_persona.name} is already {init_act_desc} at {loc}"
else:
loc = ""
if ":" in init_persona.scratch.act_address:
loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2]
init_p_desc = f"{init_persona.name} is on the way to {init_act_desc} at {loc}"
target_act_desc = target_persona.scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_persona.scratch.planned_path) == 0:
loc = ""
if ":" in target_persona.scratch.act_address:
loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2]
target_p_desc = f"{target_persona.name} is already {target_act_desc} at {loc}"
else:
loc = ""
if ":" in target_persona.scratch.act_address:
loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2]
target_p_desc = f"{target_persona.name} is on the way to {target_act_desc} at {loc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_persona.name]
prompt_input += [init_act_desc]
prompt_input += [target_persona.name]
prompt_input += [target_act_desc]
prompt_input += [init_act_desc]
return prompt_input
def __func_validate(gpt_response, prompt=""):
try:
if gpt_response.split("Answer: Option")[-1].strip().lower() in ["3", "2", "1"]:
return True
return False
except:
return False
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split("Answer: Option")[-1].strip().lower()
def get_fail_safe():
fs = "3"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/decide_to_react_v1.txt"
prompt_input = create_prompt_input(persona, target_persona, retrieved,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_create_conversation(persona, target_persona, curr_loc,
test_input=None, verbose=False):
def create_prompt_input(init_persona, target_persona, curr_loc,
test_input=None):
prev_convo_insert = "\n"
if init_persona.a_mem.seq_chat:
for i in init_persona.a_mem.seq_chat:
if i.object == target_persona.scratch.name:
v1 = int((init_persona.scratch.curr_time - i.created).total_seconds()/60)
prev_convo_insert += f'{str(v1)} minutes ago, they had the following conversation.\n'
for row in i.filling:
prev_convo_insert += f'{row[0]}: "{row[1]}"\n'
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if init_persona.a_mem.seq_chat:
if int((init_persona.scratch.curr_time - init_persona.a_mem.seq_chat[-1].created).total_seconds()/60) > 480:
prev_convo_insert = ""
init_persona_thought_nodes = init_persona.a_mem.retrieve_relevant_thoughts(target_persona.scratch.act_event[0],
target_persona.scratch.act_event[1],
target_persona.scratch.act_event[2])
init_persona_thought = ""
for i in init_persona_thought_nodes:
init_persona_thought += f"-- {i.description}\n"
target_persona_thought_nodes = target_persona.a_mem.retrieve_relevant_thoughts(init_persona.scratch.act_event[0],
init_persona.scratch.act_event[1],
init_persona.scratch.act_event[2])
target_persona_thought = ""
for i in target_persona_thought_nodes:
target_persona_thought += f"-- {i.description}\n"
init_persona_curr_desc = ""
if init_persona.scratch.planned_path:
init_persona_curr_desc = f"{init_persona.name} is on the way to {init_persona.scratch.act_description}"
else:
init_persona_curr_desc = f"{init_persona.name} is {init_persona.scratch.act_description}"
target_persona_curr_desc = ""
if target_persona.scratch.planned_path:
target_persona_curr_desc = f"{target_persona.name} is on the way to {target_persona.scratch.act_description}"
else:
target_persona_curr_desc = f"{target_persona.name} is {target_persona.scratch.act_description}"
curr_loc = curr_loc["arena"]
prompt_input = []
prompt_input += [init_persona.scratch.get_str_iss()]
prompt_input += [target_persona.scratch.get_str_iss()]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [init_persona_thought]
prompt_input += [target_persona.name]
prompt_input += [init_persona.name]
prompt_input += [target_persona_thought]
prompt_input += [init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S")]
prompt_input += [init_persona_curr_desc]
prompt_input += [target_persona_curr_desc]
prompt_input += [prev_convo_insert]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [curr_loc]
prompt_input += [init_persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
# print ("???")
# print (gpt_response)
gpt_response = (prompt + gpt_response).split("What would they talk about now?")[-1].strip()
content = re.findall('"([^"]*)"', gpt_response)
speaker_order = []
for i in gpt_response.split("\n"):
name = i.split(":")[0].strip()
if name:
speaker_order += [name]
ret = []
for count, speaker in enumerate(speaker_order):
ret += [[speaker, content[count]]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(init_persona, target_persona):
convo = [[init_persona.name, "Hi!"],
[target_persona.name, "Hi!"]]
return convo
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/create_conversation_v2.txt"
prompt_input = create_prompt_input(persona, target_persona, curr_loc,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(persona, target_persona)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False):
def create_prompt_input(conversation, test_input=None):
convo_str = ""
for row in conversation:
convo_str += f'{row[0]}: "{row[1]}"\n'
prompt_input = [convo_str]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
ret = "conversing about " + gpt_response.strip()
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "conversing with a housemate about morning greetings"
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
ret = "conversing about " + gpt_response.strip()
return ret
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 11") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_conversation_v1.txt" ########
prompt_input = create_prompt_input(conversation, test_input) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "conversing about what to eat for lunch" ########
special_instruction = "The output must continue the sentence above by filling in the <fill in> tag. Don't start with 'this is a conversation about...' Just finish the sentence but do not miss any important details (including who are chatting)." ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/summarize_conversation_v1.txt"
# prompt_input = create_prompt_input(conversation, test_input)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_extract_keywords(persona, description, test_input=None, verbose=False):
def create_prompt_input(description, test_input=None):
if "\n" in description:
description = description.replace("\n", " <LINE_BREAK> ")
prompt_input = [description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print ("???")
print (gpt_response)
gpt_response = gpt_response.strip().split("Emotive keywords:")
factual = [i.strip() for i in gpt_response[0].split(",")]
emotive = [i.strip() for i in gpt_response[1].split(",")]
all_keywords = factual + emotive
ret = []
for i in all_keywords:
if i:
i = i.lower()
if i[-1] == ".":
i = i[:-1]
ret += [i]
print (ret)
return set(ret)
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return []
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/get_keywords_v1.txt"
prompt_input = create_prompt_input(description, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_keyword_to_thoughts(persona, keyword, concept_summary, test_input=None, verbose=False):
def create_prompt_input(persona, keyword, concept_summary, test_input=None):
prompt_input = [keyword, concept_summary, persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = gpt_response.strip()
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return ""
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/keyword_to_thoughts_v1.txt"
prompt_input = create_prompt_input(persona, keyword, concept_summary)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_convo_to_thoughts(persona,
init_persona_name,
target_persona_name,
convo_str,
fin_target, test_input=None, verbose=False):
def create_prompt_input(init_persona_name,
target_persona_name,
convo_str,
fin_target, test_input=None):
prompt_input = [init_persona_name,
target_persona_name,
convo_str,
init_persona_name,
fin_target]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = gpt_response.strip()
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return ""
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/convo_to_thoughts_v1.txt"
prompt_input = create_prompt_input(init_persona_name,
target_persona_name,
convo_str,
fin_target)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
gpt_response = int(gpt_response)
return gpt_response
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 7") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_event_v1.txt" ########
prompt_input = create_prompt_input(persona, event_description) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "5" ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/poignancy_event_v1.txt"
# prompt_input = create_prompt_input(persona, event_description)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_thought_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
gpt_response = int(gpt_response)
return gpt_response
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 8") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_thought_v1.txt" ########
prompt_input = create_prompt_input(persona, event_description) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "5" ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/poignancy_thought_v1.txt"
# prompt_input = create_prompt_input(persona, event_description)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_chat_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
gpt_response = int(gpt_response)
return gpt_response
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 9") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_chat_v1.txt" ########
prompt_input = create_prompt_input(persona, event_description) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = "5" ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/poignancy_chat_v1.txt"
# prompt_input = create_prompt_input(persona, event_description)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_focal_pt(persona, statements, n, test_input=None, verbose=False):
def create_prompt_input(persona, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = "1) " + gpt_response.strip()
ret = []
for i in gpt_response.split("\n"):
ret += [i.split(") ")[-1]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(n):
return ["Who am I"] * n
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
ret = ast.literal_eval(gpt_response)
return ret
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 12") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_focal_pt_v1.txt" ########
prompt_input = create_prompt_input(persona, statements, n) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = '["What should Jane do for lunch", "Does Jane like strawberry", "Who is Jane"]' ########
special_instruction = "Output must be a list of str." ########
fail_safe = get_fail_safe(n) ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/generate_focal_pt_v1.txt"
prompt_input = create_prompt_input(persona, statements, n)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(n)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_insight_and_guidance(persona, statements, n, test_input=None, verbose=False):
def create_prompt_input(persona, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = "1. " + gpt_response.strip()
ret = dict()
for i in gpt_response.split("\n"):
row = i.split(". ")[-1]
thought = row.split("(because of ")[0].strip()
evi_raw = row.split("(because of ")[1].split(")")[0].strip()
evi_raw = re.findall(r'\d+', evi_raw)
evi_raw = [int(i.strip()) for i in evi_raw]
ret[thought] = evi_raw
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(n):
return ["I am hungry"] * n
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/insight_and_evidence_v1.txt"
prompt_input = create_prompt_input(persona, statements, n)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(n)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat_summarize_ideas(persona, target_persona, statements, curr_context, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, statements, curr_context, test_input=None):
prompt_input = [persona.scratch.get_str_curr_date_str(), curr_context, persona.scratch.currently,
statements, persona.scratch.name, target_persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
return gpt_response.split('"')[0].strip()
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 17") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_chat_ideas_v1.txt" ########
prompt_input = create_prompt_input(persona, target_persona, statements, curr_context) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = 'Jane Doe is working on a project' ########
special_instruction = 'The output should be a string that responds to the question.' ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
# "temperature": 0.5, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/summarize_chat_ideas_v1.txt"
# prompt_input = create_prompt_input(persona, target_persona, statements, curr_context)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat_summarize_relationship(persona, target_persona, statements, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, statements, test_input=None):
prompt_input = [statements, persona.scratch.name, target_persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
return gpt_response.split('"')[0].strip()
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 18") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_chat_relationship_v2.txt" ########
prompt_input = create_prompt_input(persona, target_persona, statements) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = 'Jane Doe is working on a project' ########
special_instruction = 'The output should be a string that responds to the question.' ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
# "temperature": 0.5, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/summarize_chat_relationship_v1.txt"
# prompt_input = create_prompt_input(persona, target_persona, statements)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat(maze, persona, target_persona,
curr_context,
init_summ_idea,
target_summ_idea, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, curr_context, init_summ_idea, target_summ_idea, test_input=None):
prev_convo_insert = "\n"
if persona.a_mem.seq_chat:
for i in persona.a_mem.seq_chat:
if i.object == target_persona.scratch.name:
v1 = int((persona.scratch.curr_time - i.created).total_seconds()/60)
prev_convo_insert += f'{str(v1)} minutes ago, {persona.scratch.name} and {target_persona.scratch.name} were already {i.description} This context takes place after that conversation.'
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if persona.a_mem.seq_chat:
if int((persona.scratch.curr_time - persona.a_mem.seq_chat[-1].created).total_seconds()/60) > 480:
prev_convo_insert = ""
print (prev_convo_insert)
curr_sector = f"{maze.access_tile(persona.scratch.curr_tile)['sector']}"
curr_arena= f"{maze.access_tile(persona.scratch.curr_tile)['arena']}"
curr_location = f"{curr_arena} in {curr_sector}"
prompt_input = [persona.scratch.currently,
target_persona.scratch.currently,
prev_convo_insert,
curr_context,
curr_location,
persona.scratch.name,
init_summ_idea,
persona.scratch.name,
target_persona.scratch.name,
target_persona.scratch.name,
target_summ_idea,
target_persona.scratch.name,
persona.scratch.name,
persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print (gpt_response)
gpt_response = (prompt + gpt_response).split("Here is their conversation.")[-1].strip()
content = re.findall('"([^"]*)"', gpt_response)
speaker_order = []
for i in gpt_response.split("\n"):
name = i.split(":")[0].strip()
if name:
speaker_order += [name]
ret = []
for count, speaker in enumerate(speaker_order):
ret += [[speaker, content[count]]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
# ret = ast.literal_eval(gpt_response)
print ("a;dnfdap98fh4p9enf HEREE!!!")
for row in gpt_response:
print (row)
return gpt_response
def __chat_func_validate(gpt_response, prompt=""): ############
return True
# print ("HERE JULY 23 -- ----- ") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/agent_chat_v1.txt" ########
prompt_input = create_prompt_input(persona, target_persona, curr_context, init_summ_idea, target_summ_idea) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = '[["Jane Doe", "Hi!"], ["John Doe", "Hello there!"] ... ]' ########
special_instruction = 'The output should be a list of list where the inner lists are in the form of ["<Name>", "<Utterance>"].' ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
# print ("HERE END JULY 23 -- ----- ") ########
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 2000,
# "temperature": 0.7, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/agent_chat_v1.txt"
# prompt_input = create_prompt_input(persona, target_persona, curr_context, init_summ_idea, target_summ_idea)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# =======================
# =======================
# =======================
# =======================
def run_gpt_prompt_summarize_ideas(persona, statements, question, test_input=None, verbose=False):
def create_prompt_input(persona, statements, question, test_input=None):
prompt_input = [statements, persona.scratch.name, question]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
return gpt_response.split('"')[0].strip()
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 16") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_ideas_v1.txt" ########
prompt_input = create_prompt_input(persona, statements, question) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = 'Jane Doe is working on a project' ########
special_instruction = 'The output should be a string that responds to the question.' ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
# "temperature": 0.5, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v2/summarize_ideas_v1.txt"
# prompt_input = create_prompt_input(persona, statements, question)
# prompt = generate_prompt(prompt_input, prompt_template)
# fail_safe = get_fail_safe()
# output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
# __func_validate, __func_clean_up)
# if debug or verbose:
# print_run_prompts(prompt_template, persona, gpt_param,
# prompt_input, prompt, output)
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_next_convo_line(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None, verbose=False):
def create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
interlocutor_desc,
prev_convo,
persona.scratch.name,
retrieved_summary,
persona.scratch.name,]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# # ChatGPT Plugin ===========================================================
# def __chat_func_clean_up(gpt_response, prompt=""): ############
# return gpt_response.split('"')[0].strip()
# def __chat_func_validate(gpt_response, prompt=""): ############
# try:
# __func_clean_up(gpt_response, prompt)
# return True
# except:
# return False
# print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 15") ########
# gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
# "temperature": 0, "top_p": 1, "stream": False,
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
# prompt_template = "persona/prompt_template/v3_ChatGPT/generate_next_convo_line_v1.txt" ########
# prompt_input = create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary) ########
# prompt = generate_prompt(prompt_input, prompt_template)
# example_output = 'Hello' ########
# special_instruction = 'The output should be a string that responds to the question. Again, only use the context included in the "Note" to generate the response' ########
# fail_safe = get_fail_safe() ########
# output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
# __chat_func_validate, __chat_func_clean_up, True)
# if output != False:
# return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# # ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-003", "max_tokens": 250,
"temperature": 1, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/generate_next_convo_line_v1.txt"
prompt_input = create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_whisper_inner_thought(persona, whisper, test_input=None, verbose=False):
def create_prompt_input(persona, whisper, test_input=None):
prompt_input = [persona.scratch.name, whisper]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/whisper_inner_thought_v1.txt"
prompt_input = create_prompt_input(persona, whisper)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_planning_thought_on_convo(persona, all_utt, test_input=None, verbose=False):
def create_prompt_input(persona, all_utt, test_input=None):
prompt_input = [all_utt, persona.scratch.name, persona.scratch.name, persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/planning_thought_on_convo_v1.txt"
prompt_input = create_prompt_input(persona, all_utt)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=False):
def create_prompt_input(persona, all_utt, test_input=None):
prompt_input = [all_utt, persona.scratch.name, persona.scratch.name, persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
# ChatGPT Plugin ===========================================================
def __chat_func_clean_up(gpt_response, prompt=""): ############
return gpt_response.strip()
def __chat_func_validate(gpt_response, prompt=""): ############
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 15") ########
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v3_ChatGPT/memo_on_convo_v1.txt" ########
prompt_input = create_prompt_input(persona, all_utt) ########
prompt = generate_prompt(prompt_input, prompt_template)
example_output = 'Jane Doe was interesting to talk to.' ########
special_instruction = 'The output should ONLY contain a string that summarizes anything interesting that the agent may have noticed' ########
fail_safe = get_fail_safe() ########
output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, True)
if output != False:
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
# ChatGPT Plugin ===========================================================
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/memo_on_convo_v1.txt"
prompt_input = create_prompt_input(persona, all_utt)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False):
def create_prompt_input(comment, test_input=None):
prompt_input = [comment]
return prompt_input
def __chat_func_clean_up(gpt_response, prompt=""):
gpt_response = json.loads(gpt_response)
return gpt_response["output"]
def __chat_func_validate(gpt_response, prompt=""):
try:
fields = ["output"]
response = json.loads(gpt_response)
for field in fields:
if field not in response:
return False
return True
except:
return False
def get_fail_safe():
return None
print ("11")
prompt_template = "persona/prompt_template/safety/anthromorphosization_v1.txt"
prompt_input = create_prompt_input(comment)
print ("22")
prompt = generate_prompt(prompt_input, prompt_template)
print (prompt)
fail_safe = get_fail_safe()
output = ChatGPT_safe_generate_response_OLD(prompt, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, verbose)
print (output)
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def extract_first_json_dict(data_str):
# Find the first occurrence of a JSON object within the string
start_idx = data_str.find('{')
end_idx = data_str.find('}', start_idx) + 1
# Check if both start and end indices were found
if start_idx == -1 or end_idx == 0:
return None
# Extract the first JSON dictionary
json_str = data_str[start_idx:end_idx]
try:
# Attempt to parse the JSON data
json_dict = json.loads(json_str)
return json_dict
except json.JSONDecodeError:
# If parsing fails, return None
return None
def run_gpt_generate_iterative_chat_utt(maze, init_persona, target_persona, retrieved, curr_context, curr_chat, test_input=None, verbose=False):
def create_prompt_input(maze, init_persona, target_persona, retrieved, curr_context, curr_chat, test_input=None):
persona = init_persona
prev_convo_insert = "\n"
if persona.a_mem.seq_chat:
for i in persona.a_mem.seq_chat:
if i.object == target_persona.scratch.name:
v1 = int((persona.scratch.curr_time - i.created).total_seconds()/60)
prev_convo_insert += f'{str(v1)} minutes ago, {persona.scratch.name} and {target_persona.scratch.name} were already {i.description} This context takes place after that conversation.'
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if persona.a_mem.seq_chat:
if int((persona.scratch.curr_time - persona.a_mem.seq_chat[-1].created).total_seconds()/60) > 480:
prev_convo_insert = ""
print (prev_convo_insert)
curr_sector = f"{maze.access_tile(persona.scratch.curr_tile)['sector']}"
curr_arena= f"{maze.access_tile(persona.scratch.curr_tile)['arena']}"
curr_location = f"{curr_arena} in {curr_sector}"
retrieved_str = ""
for key, vals in retrieved.items():
for v in vals:
retrieved_str += f"- {v.description}\n"
convo_str = ""
for i in curr_chat:
convo_str += ": ".join(i) + "\n"
if convo_str == "":
convo_str = "[The conversation has not started yet -- start it!]"
init_iss = f"Here is Here is a brief description of {init_persona.scratch.name}.\n{init_persona.scratch.get_str_iss()}"
prompt_input = [init_iss, init_persona.scratch.name, retrieved_str, prev_convo_insert,
curr_location, curr_context, init_persona.scratch.name, target_persona.scratch.name,
convo_str, init_persona.scratch.name, target_persona.scratch.name,
init_persona.scratch.name, init_persona.scratch.name,
init_persona.scratch.name
]
return prompt_input
def __chat_func_clean_up(gpt_response, prompt=""):
gpt_response = extract_first_json_dict(gpt_response)
cleaned_dict = dict()
cleaned = []
for key, val in gpt_response.items():
cleaned += [val]
cleaned_dict["utterance"] = cleaned[0]
cleaned_dict["end"] = True
if "f" in str(cleaned[1]) or "F" in str(cleaned[1]):
cleaned_dict["end"] = False
return cleaned_dict
def __chat_func_validate(gpt_response, prompt=""):
print ("ugh...")
try:
# print ("debug 1")
# print (gpt_response)
# print ("debug 2")
print (extract_first_json_dict(gpt_response))
# print ("debug 3")
return True
except:
return False
def get_fail_safe():
cleaned_dict = dict()
cleaned_dict["utterance"] = "..."
cleaned_dict["end"] = False
return cleaned_dict
print ("11")
prompt_template = "persona/prompt_template/v3_ChatGPT/iterative_convo_v1.txt"
prompt_input = create_prompt_input(maze, init_persona, target_persona, retrieved, curr_context, curr_chat)
print ("22")
prompt = generate_prompt(prompt_input, prompt_template)
print (prompt)
fail_safe = get_fail_safe()
output = ChatGPT_safe_generate_response_OLD(prompt, 3, fail_safe,
__chat_func_validate, __chat_func_clean_up, verbose)
print (output)
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
| generative_agents-main | reverie/backend_server/persona/prompt_template/run_gpt_prompt.py |
"""
Author: Joon Sung Park ([email protected])
File: print_prompt.py
Description: For printing prompts when the setting for verbose is set to True.
"""
import sys
sys.path.append('../')
import json
import numpy
import datetime
import random
from global_methods import *
from persona.prompt_template.gpt_structure import *
from utils import *
##############################################################################
# PERSONA Chapter 1: Prompt Structures #
##############################################################################
def print_run_prompts(prompt_template=None,
persona=None,
gpt_param=None,
prompt_input=None,
prompt=None,
output=None):
print (f"=== {prompt_template}")
print ("~~~ persona ---------------------------------------------------")
print (persona.name, "\n")
print ("~~~ gpt_param ----------------------------------------------------")
print (gpt_param, "\n")
print ("~~~ prompt_input ----------------------------------------------")
print (prompt_input, "\n")
print ("~~~ prompt ----------------------------------------------------")
print (prompt, "\n")
print ("~~~ output ----------------------------------------------------")
print (output, "\n")
print ("=== END ==========================================================")
print ("\n\n\n")
| generative_agents-main | reverie/backend_server/persona/prompt_template/print_prompt.py |
"""
Author: Joon Sung Park ([email protected])
File: defunct_run_gpt_prompt.py
Description: Defines all run gpt prompt functions. These functions directly
interface with the safe_generate_response function.
Note (March 10, 2023) -- Defunct
"""
import re
import datetime
import sys
sys.path.append('../../')
from global_methods import *
from persona.prompt_template.gpt_structure import *
from persona.prompt_template.print_prompt import *
def get_random_alphanumeric(i=6, j=6):
"""
Returns a random alpha numeric strength that has the length of somewhere
between i and j.
INPUT:
i: min_range for the length
j: max_range for the length
OUTPUT:
an alpha numeric str with the length of somewhere between i and j.
"""
k = random.randint(i, j)
x = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
return x
##############################################################################
# CHAPTER 1: Run GPT Prompt
##############################################################################
def run_gpt_prompt_wake_up_hour(persona, test_input=None, verbose=False):
"""
Given the persona, returns an integer that indicates the hour when the
persona wakes up.
INPUT:
persona: The Persona class instance
OUTPUT:
integer for the wake up hour.
"""
def create_prompt_input(persona, test_input=None):
if test_input: return test_input
prompt_input = [persona.scratch.get_str_iss(),
persona.scratch.get_str_lifestyle(),
persona.scratch.get_str_firstname()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = int(gpt_response.strip().lower().split("am")[0])
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except: return False
return True
def get_fail_safe():
fs = 8
return fs
gpt_param = {"engine": "text-davinci-002", "max_tokens": 5,
"temperature": 0.8, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/wake_up_hour_v1.txt"
prompt_input = create_prompt_input(persona, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_daily_plan(persona,
wake_up_hour,
test_input=None,
verbose=False):
"""
Basically the long term planning that spans a day. Returns a list of actions
that the persona will take today. Usually comes in the following form:
'wake up and complete the morning routine at 6:00 am',
'eat breakfast at 7:00 am',..
Note that the actions come without a period.
INPUT:
persona: The Persona class instance
OUTPUT:
a list of daily actions in broad strokes.
"""
def create_prompt_input(persona, wake_up_hour, test_input=None):
if test_input: return test_input
prompt_input = []
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [persona.scratch.get_str_lifestyle()]
prompt_input += [persona.scratch.get_str_curr_date_str()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [f"{str(wake_up_hour)}:00 am"]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = []
_cr = gpt_response.split(")")
for i in _cr:
if i[-1].isdigit():
i = i[:-1].strip()
if i[-1] == "." or i[-1] == ",":
cr += [i[:-1].strip()]
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except:
return False
return True
def get_fail_safe():
fs = ['wake up and complete the morning routine at 6:00 am',
'eat breakfast at 7:00 am',
'read a book from 8:00 am to 12:00 pm',
'have lunch at 12:00 pm',
'take a nap from 1:00 pm to 4:00 pm',
'relax and watch TV from 7:00 pm to 8:00 pm',
'go to bed at 11:00 pm']
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 500,
"temperature": 1, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/daily_planning_v6.txt"
prompt_input = create_prompt_input(persona, wake_up_hour, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = ([f"wake up and complete the morning routine at {wake_up_hour}:00 am"]
+ output)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_hourly_schedule(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2=None,
test_input=None,
verbose=False):
def create_prompt_input(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2=None,
test_input=None):
if test_input: return test_input
schedule_format = ""
for i in hour_str:
schedule_format += f"[{persona.scratch.get_str_curr_date_str()} -- {i}]"
schedule_format += f" Activity: [Fill in]\n"
schedule_format = schedule_format[:-1]
intermission_str = f"Here the originally intended hourly breakdown of"
intermission_str += f" {persona.scratch.get_str_firstname()}'s schedule today: "
for count, i in enumerate(persona.scratch.daily_req):
intermission_str += f"{str(count+1)}) {i}, "
intermission_str = intermission_str[:-2]
prior_schedule = ""
if p_f_ds_hourly_org:
prior_schedule = "\n"
for count, i in enumerate(p_f_ds_hourly_org):
prior_schedule += f"[(ID:{get_random_alphanumeric()})"
prior_schedule += f" {persona.scratch.get_str_curr_date_str()} --"
prior_schedule += f" {hour_str[count]}] Activity:"
prior_schedule += f" {persona.scratch.get_str_firstname()}"
prior_schedule += f" is {i}\n"
prompt_ending = f"[(ID:{get_random_alphanumeric()})"
prompt_ending += f" {persona.scratch.get_str_curr_date_str()}"
prompt_ending += f" -- {curr_hour_str}] Activity:"
prompt_ending += f" {persona.scratch.get_str_firstname()} is"
if intermission2:
intermission2 = f"\n{intermission2}"
prompt_input = []
prompt_input += [schedule_format]
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [prior_schedule + "\n"]
prompt_input += [intermission_str]
if intermission2:
prompt_input += [intermission2]
else:
prompt_input += [""]
prompt_input += [prompt_ending]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if cr[-1] == ".":
cr = cr[:-1]
return cr
def __func_validate(gpt_response, prompt=""):
try: __func_clean_up(gpt_response, prompt="")
except: return False
return True
def get_fail_safe():
fs = "asleep"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_hourly_schedule_v2.txt"
prompt_input = create_prompt_input(persona,
curr_hour_str,
p_f_ds_hourly_org,
hour_str,
intermission2,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_task_decomp(persona,
task,
duration,
test_input=None,
verbose=False):
def create_prompt_input(persona, task, duration, test_input=None):
"""
Today is Saturday June 25. From 00:00 ~ 06:00am, Maeve is
planning on sleeping, 06:00 ~ 07:00am, Maeve is
planning on waking up and doing her morning routine,
and from 07:00am ~08:00am, Maeve is planning on having breakfast.
"""
curr_f_org_index = persona.scratch.get_f_daily_schedule_hourly_org_index()
all_indices = []
# if curr_f_org_index > 0:
# all_indices += [curr_f_org_index-1]
all_indices += [curr_f_org_index]
if curr_f_org_index+1 <= len(persona.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index+1]
if curr_f_org_index+2 <= len(persona.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index+2]
curr_time_range = ""
print ("DEBUG")
print (persona.scratch.f_daily_schedule_hourly_org)
print (all_indices)
summ_str = f'Today is {persona.scratch.curr_time.strftime("%B %d, %Y")}. '
summ_str += f'From '
for index in all_indices:
print ("index", index)
if index < len(persona.scratch.f_daily_schedule_hourly_org):
start_min = 0
for i in range(index):
start_min += persona.scratch.f_daily_schedule_hourly_org[i][1]
end_min = start_min + persona.scratch.f_daily_schedule_hourly_org[index][1]
start_time = (datetime.datetime.strptime("00:00:00", "%H:%M:%S")
+ datetime.timedelta(minutes=start_min))
end_time = (datetime.datetime.strptime("00:00:00", "%H:%M:%S")
+ datetime.timedelta(minutes=end_min))
start_time_str = start_time.strftime("%H:%M%p")
end_time_str = end_time.strftime("%H:%M%p")
summ_str += f"{start_time_str} ~ {end_time_str}, {persona.name} is planning on {persona.scratch.f_daily_schedule_hourly_org[index][0]}, "
if curr_f_org_index+1 == index:
curr_time_range = f'{start_time_str} ~ {end_time_str}'
summ_str = summ_str[:-2] + "."
prompt_input = []
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [summ_str]
# prompt_input += [persona.scratch.get_str_curr_date_str()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [persona.scratch.get_str_firstname()]
prompt_input += [task]
prompt_input += [curr_time_range]
prompt_input += [duration]
prompt_input += [persona.scratch.get_str_firstname()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print ("TOODOOOOOO")
print (gpt_response)
print ("-==- -==- -==- ")
# TODO SOMETHING HERE sometimes fails... See screenshot
temp = [i.strip() for i in gpt_response.split("\n")]
_cr = []
cr = []
for count, i in enumerate(temp):
if count != 0:
_cr += [" ".join([j.strip () for j in i.split(" ")][3:])]
else:
_cr += [i]
for count, i in enumerate(_cr):
k = [j.strip() for j in i.split("(duration in minutes:")]
task = k[0]
if task[-1] == ".":
task = task[:-1]
duration = int(k[1].split(",")[0].strip())
cr += [[task, duration]]
total_expected_min = int(prompt.split("(total duration in minutes")[-1]
.split("):")[0].strip())
# TODO -- now, you need to make sure that this is the same as the sum of
# the current action sequence.
curr_min_slot = [["dummy", -1],] # (task_name, task_index)
for count, i in enumerate(cr):
i_task = i[0]
i_duration = i[1]
i_duration -= (i_duration % 5)
if i_duration > 0:
for j in range(i_duration):
curr_min_slot += [(i_task, count)]
curr_min_slot = curr_min_slot[1:]
if len(curr_min_slot) > total_expected_min:
last_task = curr_min_slot[60]
for i in range(1, 6):
curr_min_slot[-1 * i] = last_task
elif len(curr_min_slot) < total_expected_min:
last_task = curr_min_slot[-1]
for i in range(total_expected_min - len(curr_min_slot)):
curr_min_slot += [last_task]
cr_ret = [["dummy", -1],]
for task, task_index in curr_min_slot:
if task != cr_ret[-1][0]:
cr_ret += [[task, 1]]
else:
cr_ret[-1][1] += 1
cr = cr_ret[1:]
return cr
def __func_validate(gpt_response, prompt=""):
# TODO -- this sometimes generates error
try:
__func_clean_up(gpt_response)
except:
pass
# return False
return gpt_response
def get_fail_safe():
fs = ["asleep"]
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/task_decomp_v3.txt"
prompt_input = create_prompt_input(persona, task, duration)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
print ("?????")
print (prompt)
output = safe_generate_response(prompt, gpt_param, 5, get_fail_safe(),
__func_validate, __func_clean_up)
# TODO THERE WAS A BUG HERE...
# This is for preventing overflows...
"""
File "/Users/joonsungpark/Desktop/Stanford/Projects/
generative-personas/src_exploration/reverie_simulation/
brain/get_next_action_v3.py", line 364, in run_gpt_prompt_task_decomp
fin_output[-1][1] += (duration - ftime_sum)
IndexError: list index out of range
"""
print ("IMPORTANT VVV DEBUG")
# print (prompt_input)
# print (prompt)
print (output)
fin_output = []
time_sum = 0
for i_task, i_duration in output:
time_sum += i_duration
# HM?????????
# if time_sum < duration:
if time_sum <= duration:
fin_output += [[i_task, i_duration]]
else:
break
ftime_sum = 0
for fi_task, fi_duration in fin_output:
ftime_sum += fi_duration
# print ("for debugging... line 365", fin_output)
fin_output[-1][1] += (duration - ftime_sum)
output = fin_output
task_decomp = output
ret = []
for decomp_task, duration in task_decomp:
ret += [[f"{task} ({decomp_task})", duration]]
output = ret
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_sector(action_description,
persona,
maze,
test_input=None,
verbose=False):
def create_prompt_input(action_description, persona, maze, test_input=None):
act_world = f"{maze.access_tile(persona.scratch.curr_tile)['world']}"
prompt_input = []
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [persona.scratch.living_area.split(":")[1]]
x = f"{act_world}:{persona.scratch.living_area.split(':')[1]}"
prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [f"{maze.access_tile(persona.scratch.curr_tile)['sector']}"]
x = f"{act_world}:{maze.access_tile(persona.scratch.curr_tile)['sector']}"
prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)]
if persona.scratch.get_str_daily_plan_req() != "":
prompt_input += [f"\n{persona.scratch.get_str_daily_plan_req()}"]
else:
prompt_input += [""]
prompt_input += [persona.s_mem.get_str_accessible_sectors(act_world)]
action_description_1 = action_description
action_description_2 = action_description
if "(" in action_description:
action_description_1 = action_description.split("(")[0].strip()
action_description_2 = action_description.split("(")[-1][:-1]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [action_description_1]
prompt_input += [action_description_2]
prompt_input += [persona.scratch.get_str_name()]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.split("}")[0]
return cleaned_response
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
if "}" not in gpt_response:
return False
if "," in gpt_response:
return False
return True
def get_fail_safe():
fs = ("kitchen")
return fs
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_location_sector_v2.txt"
prompt_input = create_prompt_input(action_description, persona, maze)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
y = f"{maze.access_tile(persona.scratch.curr_tile)['world']}"
x = [i.strip() for i in persona.s_mem.get_str_accessible_sectors(y).split(",")]
if output not in x:
# output = random.choice(x)
output = persona.scratch.living_area.split(":")[1]
print ("DEBUG", random.choice(x), "------", output)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_arena(action_description,
persona,
maze, act_world, act_sector,
test_input=None,
verbose=False):
def create_prompt_input(action_description, persona, maze, act_world, act_sector, test_input=None):
prompt_input = []
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [maze.access_tile(persona.scratch.curr_tile)["arena"]]
prompt_input += [maze.access_tile(persona.scratch.curr_tile)["sector"]]
prompt_input += [persona.scratch.get_str_name()]
x = f"{act_world}:{act_sector}"
prompt_input += [act_sector]
prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)]
action_description_1 = action_description
action_description_2 = action_description
if "(" in action_description:
action_description_1 = action_description.split("(")[0].strip()
action_description_2 = action_description.split("(")[-1][:-1]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [action_description_1]
prompt_input += [action_description_2]
prompt_input += [persona.scratch.get_str_name()]
prompt_input += [act_sector]
# prompt_input += [maze.access_tile(persona.scratch.curr_tile)["arena"]]
# x = f"{maze.access_tile(persona.scratch.curr_tile)['world']}:{maze.access_tile(persona.scratch.curr_tile)['sector']}:{maze.access_tile(persona.scratch.curr_tile)['arena']}"
# prompt_input += [persona.s_mem.get_str_accessible_arena_game_objects(x)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.split("}")[0]
return cleaned_response
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
if "}" not in gpt_response:
return False
if "," in gpt_response:
return False
return True
def get_fail_safe():
fs = ("kitchen")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_location_object_v1.txt"
prompt_input = create_prompt_input(action_description, persona, maze, act_world, act_sector)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
y = f"{act_world}:{act_sector}"
x = [i.strip() for i in persona.s_mem.get_str_accessible_sector_arenas(y).split(",")]
if output not in x:
output = random.choice(x)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_action_game_object(action_description,
persona,
maze,
temp_address,
test_input=None,
verbose=False):
def create_prompt_input(action_description,
persona,
temp_address,
test_input=None):
prompt_input = []
if "(" in action_description:
action_description = action_description.split("(")[-1][:-1]
prompt_input += [action_description]
prompt_input += [persona
.s_mem.get_str_accessible_arena_game_objects(temp_address)]
return prompt_input
def __func_validate(gpt_response, prompt=""):
if len(gpt_response.strip()) < 1:
return False
return True
def __func_clean_up(gpt_response, prompt=""):
cleaned_response = gpt_response.strip()
return cleaned_response
def get_fail_safe():
fs = ("bed")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v1/action_object_v2.txt"
prompt_input = create_prompt_input(action_description,
persona,
temp_address,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
x = [i.strip() for i in persona.s_mem.get_str_accessible_arena_game_objects(temp_address).split(",")]
if output not in x:
output = random.choice(x)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_pronunciatio(action_description, persona, verbose=False):
def create_prompt_input(action_description):
if "(" in action_description:
action_description = action_description.split("(")[-1].split(")")[0]
prompt_input = [action_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if len(cr) > 3:
cr = cr[:3]
return cr
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt="")
if len(gpt_response) == 0:
return False
except: return False
return True
def get_fail_safe():
fs = "π"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_pronunciatio_v1.txt"
prompt_input = create_prompt_input(action_description)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_event_triple(action_description, persona, verbose=False):
def create_prompt_input(action_description, persona):
if "(" in action_description:
action_description = action_description.split("(")[-1].split(")")[0]
prompt_input = [persona.name,
action_description,
persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
if len(gpt_response) != 2:
return False
except: return False
return True
def get_fail_safe(persona):
fs = (persona.name, "is", "idle")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
prompt_input = create_prompt_input(action_description, persona)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(persona)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = (persona.name, output[0], output[1])
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False):
def create_prompt_input(act_game_object, act_desp, persona):
prompt_input = [act_game_object,
persona.name,
act_desp,
act_game_object,
act_game_object]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
if cr[-1] == ".": cr = cr[:-1]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
except:
return False
return True
def get_fail_safe(act_game_object):
fs = f"{act_game_object} is idle"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_obj_event_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_desp, persona)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(act_game_object)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_act_obj_event_triple(act_game_object, act_obj_desc, persona, verbose=False):
def create_prompt_input(act_game_object, act_obj_desc):
prompt_input = [act_game_object,
act_obj_desc,
act_game_object]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
cr = gpt_response.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt="")
if len(gpt_response) != 2:
return False
except: return False
return True
def get_fail_safe(act_game_object):
fs = (act_game_object, "is", "idle")
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_obj_desc)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(act_game_object)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
output = (act_game_object, output[0], output[1])
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_new_decomp_schedule(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input=None,
verbose=False):
def create_prompt_input(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input=None):
persona_name = persona.name
start_hour_str = start_time_hour.strftime("%H:%M %p")
end_hour_str = end_time_hour.strftime("%H:%M %p")
original_plan = ""
for_time = start_time_hour
for i in main_act_dur:
original_plan += f'{for_time.strftime("%H:%M")} ~ {(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
original_plan += "\n"
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init = ""
for_time = start_time_hour
for count, i in enumerate(truncated_act_dur):
new_plan_init += f'{for_time.strftime("%H:%M")} ~ {(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
new_plan_init += "\n"
if count < len(truncated_act_dur) - 1:
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init += (for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M") + " ~"
prompt_input = [persona_name,
start_hour_str,
end_hour_str,
original_plan,
persona_name,
inserted_act,
inserted_act_dur,
persona_name,
start_hour_str,
end_hour_str,
end_hour_str,
new_plan_init]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
new_schedule = prompt + " " + gpt_response.strip()
new_schedule = new_schedule.split("The revised schedule:")[-1].strip()
new_schedule = new_schedule.split("\n")
ret_temp = []
for i in new_schedule:
ret_temp += [i.split(" -- ")]
ret = []
for time_str, action in ret_temp:
start_time = time_str.split(" ~ ")[0].strip()
end_time = time_str.split(" ~ ")[1].strip()
delta = datetime.datetime.strptime(end_time, "%H:%M") - datetime.datetime.strptime(start_time, "%H:%M")
delta_min = int(delta.total_seconds()/60)
if delta_min < 0: delta_min = 0
ret += [[action, delta_min]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
gpt_response = __func_clean_up(gpt_response, prompt)
dur_sum = 0
for act, dur in gpt_response:
dur_sum += dur
if str(type(act)) != "<class 'str'>":
return False
if str(type(dur)) != "<class 'int'>":
return False
x = prompt.split("\n")[0].split("originally planned schedule from")[-1].strip()[:-1]
x = [datetime.datetime.strptime(i.strip(), "%H:%M %p") for i in x.split(" to ")]
delta_min = int((x[1] - x[0]).total_seconds()/60)
if int(dur_sum) != int(delta_min):
return False
except:
return False
return True
def get_fail_safe(main_act_dur, truncated_act_dur):
dur_sum = 0
for act, dur in main_act_dur: dur_sum += dur
ret = truncated_act_dur[:]
ret += main_act_dur[len(ret)-1:]
# If there are access, we need to trim...
ret_dur_sum = 0
count = 0
over = None
for act, dur in ret:
ret_dur_sum += dur
if ret_dur_sum == dur_sum:
break
if ret_dur_sum > dur_sum:
over = ret_dur_sum - dur_sum
break
count += 1
if over:
ret = ret[:count+1]
ret[-1][1] -= over
return ret
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/new_decomp_schedule_v1.txt"
prompt_input = create_prompt_input(persona,
main_act_dur,
truncated_act_dur,
start_time_hour,
end_time_hour,
inserted_act,
inserted_act_dur,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(main_act_dur, truncated_act_dur)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
# print ("* * * * output")
# print (output)
# print ('* * * * fail_safe')
# print (fail_safe)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_decide_to_talk(persona, target_persona, retrieved,test_input=None,
verbose=False):
def create_prompt_input(init_persona, target_persona, retrieved,
test_input=None):
last_chat = init_persona.a_mem.get_last_chat(target_persona.name)
last_chatted_time = ""
last_chat_about = ""
if last_chat:
last_chatted_time = last_chat.created.strftime("%B %d, %Y, %H:%M:%S")
last_chat_about = last_chat.description
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = init_persona.scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(init_persona.scratch.planned_path) == 0 and "waiting" not in init_act_desc:
init_p_desc = f"{init_persona.name} is already {init_act_desc}"
elif "waiting" in init_act_desc:
init_p_desc = f"{init_persona.name} is {init_act_desc}"
else:
init_p_desc = f"{init_persona.name} is on the way to {init_act_desc}"
target_act_desc = target_persona.scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_persona.scratch.planned_path) == 0 and "waiting" not in init_act_desc:
target_p_desc = f"{target_persona.name} is already {target_act_desc}"
elif "waiting" in init_act_desc:
target_p_desc = f"{init_persona.name} is {init_act_desc}"
else:
target_p_desc = f"{target_persona.name} is on the way to {target_act_desc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [last_chatted_time]
prompt_input += [last_chat_about]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
return prompt_input
def __func_validate(gpt_response, prompt=""):
try:
if gpt_response.split("Answer in yes or no:")[-1].strip().lower() in ["yes", "no"]:
return True
return False
except:
return False
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split("Answer in yes or no:")[-1].strip().lower()
def get_fail_safe():
fs = "yes"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/decide_to_talk_v2.txt"
prompt_input = create_prompt_input(persona, target_persona, retrieved,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input=None,
verbose=False):
def create_prompt_input(init_persona, target_persona, retrieved,
test_input=None):
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = init_persona.scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(init_persona.scratch.planned_path) == 0:
loc = ""
if ":" in init_persona.scratch.act_address:
loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2]
init_p_desc = f"{init_persona.name} is already {init_act_desc} at {loc}"
else:
loc = ""
if ":" in init_persona.scratch.act_address:
loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2]
init_p_desc = f"{init_persona.name} is on the way to {init_act_desc} at {loc}"
target_act_desc = target_persona.scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_persona.scratch.planned_path) == 0:
loc = ""
if ":" in target_persona.scratch.act_address:
loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2]
target_p_desc = f"{target_persona.name} is already {target_act_desc} at {loc}"
else:
loc = ""
if ":" in target_persona.scratch.act_address:
loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2]
target_p_desc = f"{target_persona.name} is on the way to {target_act_desc} at {loc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_persona.name]
prompt_input += [init_act_desc]
prompt_input += [target_persona.name]
prompt_input += [target_act_desc]
prompt_input += [init_act_desc]
return prompt_input
def __func_validate(gpt_response, prompt=""):
try:
if gpt_response.split("Answer: Option")[-1].strip().lower() in ["3", "2", "1"]:
return True
return False
except:
return False
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split("Answer: Option")[-1].strip().lower()
def get_fail_safe():
fs = "3"
return fs
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/decide_to_react_v1.txt"
prompt_input = create_prompt_input(persona, target_persona, retrieved,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_create_conversation(persona, target_persona, curr_loc,
test_input=None, verbose=False):
def create_prompt_input(init_persona, target_persona, curr_loc,
test_input=None):
prev_convo_insert = "\n"
if init_persona.a_mem.seq_chat:
for i in init_persona.a_mem.seq_chat:
if i.object == target_persona.scratch.name:
v1 = int((init_persona.scratch.curr_time - i.created).total_seconds()/60)
prev_convo_insert += f'{str(v1)} minutes ago, they had the following conversation.\n'
for row in i.filling:
prev_convo_insert += f'{row[0]}: "{row[1]}"\n'
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if init_persona.a_mem.seq_chat:
if int((init_persona.scratch.curr_time - init_persona.a_mem.seq_chat[-1].created).total_seconds()/60) > 480:
prev_convo_insert = ""
init_persona_thought_nodes = init_persona.a_mem.retrieve_relevant_thoughts(target_persona.scratch.act_event[0],
target_persona.scratch.act_event[1],
target_persona.scratch.act_event[2])
init_persona_thought = ""
for i in init_persona_thought_nodes:
init_persona_thought += f"-- {i.description}\n"
target_persona_thought_nodes = target_persona.a_mem.retrieve_relevant_thoughts(init_persona.scratch.act_event[0],
init_persona.scratch.act_event[1],
init_persona.scratch.act_event[2])
target_persona_thought = ""
for i in target_persona_thought_nodes:
target_persona_thought += f"-- {i.description}\n"
init_persona_curr_desc = ""
if init_persona.scratch.planned_path:
init_persona_curr_desc = f"{init_persona.name} is on the way to {init_persona.scratch.act_description}"
else:
init_persona_curr_desc = f"{init_persona.name} is {init_persona.scratch.act_description}"
target_persona_curr_desc = ""
if target_persona.scratch.planned_path:
target_persona_curr_desc = f"{target_persona.name} is on the way to {target_persona.scratch.act_description}"
else:
target_persona_curr_desc = f"{target_persona.name} is {target_persona.scratch.act_description}"
curr_loc = curr_loc["arena"]
prompt_input = []
prompt_input += [init_persona.scratch.get_str_iss()]
prompt_input += [target_persona.scratch.get_str_iss()]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [init_persona_thought]
prompt_input += [target_persona.name]
prompt_input += [init_persona.name]
prompt_input += [target_persona_thought]
prompt_input += [init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S")]
prompt_input += [init_persona_curr_desc]
prompt_input += [target_persona_curr_desc]
prompt_input += [prev_convo_insert]
prompt_input += [init_persona.name]
prompt_input += [target_persona.name]
prompt_input += [curr_loc]
prompt_input += [init_persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
# print ("???")
# print (gpt_response)
gpt_response = (prompt + gpt_response).split("What would they talk about now?")[-1].strip()
content = re.findall('"([^"]*)"', gpt_response)
speaker_order = []
for i in gpt_response.split("\n"):
name = i.split(":")[0].strip()
if name:
speaker_order += [name]
ret = []
for count, speaker in enumerate(speaker_order):
ret += [[speaker, content[count]]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(init_persona, target_persona):
convo = [[init_persona.name, "Hi!"],
[target_persona.name, "Hi!"]]
return convo
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/create_conversation_v2.txt"
prompt_input = create_prompt_input(persona, target_persona, curr_loc,
test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(persona, target_persona)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False):
def create_prompt_input(conversation, test_input=None):
convo_str = ""
for row in conversation:
convo_str += f'{row[0]}: "{row[1]}"\n'
prompt_input = [convo_str]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
ret = "conversing about " + gpt_response.strip()
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "conversing with a housemate about morning greetings"
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/summarize_conversation_v1.txt"
prompt_input = create_prompt_input(conversation, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_extract_keywords(persona, description, test_input=None, verbose=False):
def create_prompt_input(description, test_input=None):
if "\n" in description:
description = description.replace("\n", " <LINE_BREAK> ")
prompt_input = [description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print ("???")
print (gpt_response)
gpt_response = gpt_response.strip().split("Emotive keywords:")
factual = [i.strip() for i in gpt_response[0].split(",")]
emotive = [i.strip() for i in gpt_response[1].split(",")]
all_keywords = factual + emotive
ret = []
for i in all_keywords:
if i:
i = i.lower()
if i[-1] == ".":
i = i[:-1]
ret += [i]
print (ret)
return set(ret)
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return []
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/get_keywords_v1.txt"
prompt_input = create_prompt_input(description, test_input)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_keyword_to_thoughts(persona, keyword, concept_summary, test_input=None, verbose=False):
def create_prompt_input(persona, keyword, concept_summary, test_input=None):
prompt_input = [keyword, concept_summary, persona.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = gpt_response.strip()
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return ""
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/keyword_to_thoughts_v1.txt"
prompt_input = create_prompt_input(persona, keyword, concept_summary)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_convo_to_thoughts(persona,
init_persona_name,
target_persona_name,
convo_str,
fin_target, test_input=None, verbose=False):
def create_prompt_input(init_persona_name,
target_persona_name,
convo_str,
fin_target, test_input=None):
prompt_input = [init_persona_name,
target_persona_name,
convo_str,
init_persona_name,
fin_target]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = gpt_response.strip()
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return ""
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/convo_to_thoughts_v1.txt"
prompt_input = create_prompt_input(init_persona_name,
target_persona_name,
convo_str,
fin_target)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/poignancy_event_v1.txt"
prompt_input = create_prompt_input(persona, event_description)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_thought_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/poignancy_thought_v1.txt"
prompt_input = create_prompt_input(persona, event_description)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_chat_poignancy(persona, event_description, test_input=None, verbose=False):
def create_prompt_input(persona, event_description, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
event_description]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = int(gpt_response.strip())
return gpt_response
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return 4
gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/poignancy_chat_v1.txt"
prompt_input = create_prompt_input(persona, event_description)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_focal_pt(persona, statements, n, test_input=None, verbose=False):
def create_prompt_input(persona, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = "1) " + gpt_response.strip()
ret = []
for i in gpt_response.split("\n"):
ret += [i.split(") ")[-1]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(n):
return ["Who am I"] * n
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/generate_focal_pt_v1.txt"
prompt_input = create_prompt_input(persona, statements, n)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(n)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_insight_and_guidance(persona, statements, n, test_input=None, verbose=False):
def create_prompt_input(persona, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
gpt_response = "1. " + gpt_response.strip()
ret = dict()
for i in gpt_response.split("\n"):
row = i.split(". ")[-1]
thought = row.split("(because of ")[0].strip()
evi_raw = row.split("(because of ")[1].split(")")[0].strip()
evi_raw = re.findall(r'\d+', evi_raw)
evi_raw = [int(i.strip()) for i in evi_raw]
ret[thought] = evi_raw
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe(n):
return ["I am hungry"] * n
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/insight_and_evidence_v1.txt"
prompt_input = create_prompt_input(persona, statements, n)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe(n)
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat_summarize_ideas(persona, target_persona, statements, curr_context, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, statements, curr_context, test_input=None):
prompt_input = [persona.scratch.get_str_curr_date_str(), curr_context, persona.scratch.currently,
statements, persona.scratch.name, target_persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/summarize_chat_ideas_v1.txt"
prompt_input = create_prompt_input(persona, target_persona, statements, curr_context)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat_summarize_relationship(persona, target_persona, statements, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, statements, test_input=None):
prompt_input = [statements, persona.scratch.name, target_persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/summarize_chat_relationship_v1.txt"
prompt_input = create_prompt_input(persona, target_persona, statements)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_agent_chat(persona, target_persona,
curr_context,
init_summ_idea,
target_summ_idea, test_input=None, verbose=False):
def create_prompt_input(persona, target_persona, curr_context, init_summ_idea, target_summ_idea, test_input=None):
prev_convo_insert = "\n"
if persona.a_mem.seq_chat:
for i in persona.a_mem.seq_chat:
if i.object == target_persona.scratch.name:
v1 = int((persona.scratch.curr_time - i.created).total_seconds()/60)
prev_convo_insert += f'{str(v1)} minutes ago, {persona.scratch.name} and {target_persona.scratch.name} were already {i.description} This context takes place after that conversation.'
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if persona.a_mem.seq_chat:
if int((persona.scratch.curr_time - persona.a_mem.seq_chat[-1].created).total_seconds()/60) > 480:
prev_convo_insert = ""
print (prev_convo_insert)
prompt_input = [persona.scratch.currently, target_persona.scratch.currently, prev_convo_insert,
curr_context, init_summ_idea, target_summ_idea, persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
print (gpt_response)
gpt_response = (prompt + gpt_response).split("Here is their conversation.")[-1].strip()
content = re.findall('"([^"]*)"', gpt_response)
speaker_order = []
for i in gpt_response.split("\n"):
name = i.split(":")[0].strip()
if name:
speaker_order += [name]
ret = []
for count, speaker in enumerate(speaker_order):
ret += [[speaker, content[count]]]
return ret
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 2000,
"temperature": 0.7, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/agent_chat_v1.txt"
prompt_input = create_prompt_input(persona, target_persona, curr_context, init_summ_idea, target_summ_idea)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_summarize_ideas(persona, statements, question, test_input=None, verbose=False):
def create_prompt_input(persona, statements, question, test_input=None):
prompt_input = [statements, persona.scratch.name, question]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
"temperature": 0.5, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/summarize_ideas_v1.txt"
prompt_input = create_prompt_input(persona, statements, question)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_next_convo_line(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None, verbose=False):
def create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None):
prompt_input = [persona.scratch.name,
persona.scratch.get_str_iss(),
persona.scratch.name,
interlocutor_desc,
prev_convo,
retrieved_summary,
persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 250,
"temperature": 1, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/generate_next_convo_line_v1.txt"
prompt_input = create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_generate_whisper_inner_thought(persona, whisper, test_input=None, verbose=False):
def create_prompt_input(persona, whisper, test_input=None):
prompt_input = [persona.scratch.name, whisper]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/whisper_inner_thought_v1.txt"
prompt_input = create_prompt_input(persona, whisper)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_planning_thought_on_convo(persona, all_utt, test_input=None, verbose=False):
def create_prompt_input(persona, all_utt, test_input=None):
prompt_input = [all_utt, persona.scratch.name, persona.scratch.name, persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/planning_thought_on_convo_v1.txt"
prompt_input = create_prompt_input(persona, all_utt)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=False):
def create_prompt_input(persona, all_utt, test_input=None):
prompt_input = [all_utt, persona.scratch.name, persona.scratch.name, persona.scratch.name]
return prompt_input
def __func_clean_up(gpt_response, prompt=""):
return gpt_response.split('"')[0].strip()
def __func_validate(gpt_response, prompt=""):
try:
__func_clean_up(gpt_response, prompt)
return True
except:
return False
def get_fail_safe():
return "..."
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
prompt_template = "persona/prompt_template/v2/memo_on_convo_v1.txt"
prompt_input = create_prompt_input(persona, all_utt)
prompt = generate_prompt(prompt_input, prompt_template)
fail_safe = get_fail_safe()
output = safe_generate_response(prompt, gpt_param, 5, fail_safe,
__func_validate, __func_clean_up)
if debug or verbose:
print_run_prompts(prompt_template, persona, gpt_param,
prompt_input, prompt, output)
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
| generative_agents-main | reverie/backend_server/persona/prompt_template/defunct_run_gpt_prompt.py |
"""
Author: Joon Sung Park ([email protected])
File: scratch.py
Description: Defines the short-term memory module for generative agents.
"""
import datetime
import json
import sys
sys.path.append('../../')
from global_methods import *
class Scratch:
def __init__(self, f_saved):
# PERSONA HYPERPARAMETERS
# <vision_r> denotes the number of tiles that the persona can see around
# them.
self.vision_r = 4
# <att_bandwidth> TODO
self.att_bandwidth = 3
# <retention> TODO
self.retention = 5
# WORLD INFORMATION
# Perceived world time.
self.curr_time = None
# Current x,y tile coordinate of the persona.
self.curr_tile = None
# Perceived world daily requirement.
self.daily_plan_req = None
# THE CORE IDENTITY OF THE PERSONA
# Base information about the persona.
self.name = None
self.first_name = None
self.last_name = None
self.age = None
# L0 permanent core traits.
self.innate = None
# L1 stable traits.
self.learned = None
# L2 external implementation.
self.currently = None
self.lifestyle = None
self.living_area = None
# REFLECTION VARIABLES
self.concept_forget = 100
self.daily_reflection_time = 60 * 3
self.daily_reflection_size = 5
self.overlap_reflect_th = 2
self.kw_strg_event_reflect_th = 4
self.kw_strg_thought_reflect_th = 4
# New reflection variables
self.recency_w = 1
self.relevance_w = 1
self.importance_w = 1
self.recency_decay = 0.99
self.importance_trigger_max = 150
self.importance_trigger_curr = self.importance_trigger_max
self.importance_ele_n = 0
self.thought_count = 5
# PERSONA PLANNING
# <daily_req> is a list of various goals the persona is aiming to achieve
# today.
# e.g., ['Work on her paintings for her upcoming show',
# 'Take a break to watch some TV',
# 'Make lunch for herself',
# 'Work on her paintings some more',
# 'Go to bed early']
# They have to be renewed at the end of the day, which is why we are
# keeping track of when they were first generated.
self.daily_req = []
# <f_daily_schedule> denotes a form of long term planning. This lays out
# the persona's daily plan.
# Note that we take the long term planning and short term decomposition
# appoach, which is to say that we first layout hourly schedules and
# gradually decompose as we go.
# Three things to note in the example below:
# 1) See how "sleeping" was not decomposed -- some of the common events
# really, just mainly sleeping, are hard coded to be not decomposable.
# 2) Some of the elements are starting to be decomposed... More of the
# things will be decomposed as the day goes on (when they are
# decomposed, they leave behind the original hourly action description
# in tact).
# 3) The latter elements are not decomposed. When an event occurs, the
# non-decomposed elements go out the window.
# e.g., [['sleeping', 360],
# ['wakes up and ... (wakes up and stretches ...)', 5],
# ['wakes up and starts her morning routine (out of bed )', 10],
# ...
# ['having lunch', 60],
# ['working on her painting', 180], ...]
self.f_daily_schedule = []
# <f_daily_schedule_hourly_org> is a replica of f_daily_schedule
# initially, but retains the original non-decomposed version of the hourly
# schedule.
# e.g., [['sleeping', 360],
# ['wakes up and starts her morning routine', 120],
# ['working on her painting', 240], ... ['going to bed', 60]]
self.f_daily_schedule_hourly_org = []
# CURR ACTION
# <address> is literally the string address of where the action is taking
# place. It comes in the form of
# "{world}:{sector}:{arena}:{game_objects}". It is important that you
# access this without doing negative indexing (e.g., [-1]) because the
# latter address elements may not be present in some cases.
# e.g., "dolores double studio:double studio:bedroom 1:bed"
self.act_address = None
# <start_time> is a python datetime instance that indicates when the
# action has started.
self.act_start_time = None
# <duration> is the integer value that indicates the number of minutes an
# action is meant to last.
self.act_duration = None
# <description> is a string description of the action.
self.act_description = None
# <pronunciatio> is the descriptive expression of the self.description.
# Currently, it is implemented as emojis.
self.act_pronunciatio = None
# <event_form> represents the event triple that the persona is currently
# engaged in.
self.act_event = (self.name, None, None)
# <obj_description> is a string description of the object action.
self.act_obj_description = None
# <obj_pronunciatio> is the descriptive expression of the object action.
# Currently, it is implemented as emojis.
self.act_obj_pronunciatio = None
# <obj_event_form> represents the event triple that the action object is
# currently engaged in.
self.act_obj_event = (self.name, None, None)
# <chatting_with> is the string name of the persona that the current
# persona is chatting with. None if it does not exist.
self.chatting_with = None
# <chat> is a list of list that saves a conversation between two personas.
# It comes in the form of: [["Dolores Murphy", "Hi"],
# ["Maeve Jenson", "Hi"] ...]
self.chat = None
# <chatting_with_buffer>
# e.g., ["Dolores Murphy"] = self.vision_r
self.chatting_with_buffer = dict()
self.chatting_end_time = None
# <path_set> is True if we've already calculated the path the persona will
# take to execute this action. That path is stored in the persona's
# scratch.planned_path.
self.act_path_set = False
# <planned_path> is a list of x y coordinate tuples (tiles) that describe
# the path the persona is to take to execute the <curr_action>.
# The list does not include the persona's current tile, and includes the
# destination tile.
# e.g., [(50, 10), (49, 10), (48, 10), ...]
self.planned_path = []
if check_if_file_exists(f_saved):
# If we have a bootstrap file, load that here.
scratch_load = json.load(open(f_saved))
self.vision_r = scratch_load["vision_r"]
self.att_bandwidth = scratch_load["att_bandwidth"]
self.retention = scratch_load["retention"]
if scratch_load["curr_time"]:
self.curr_time = datetime.datetime.strptime(scratch_load["curr_time"],
"%B %d, %Y, %H:%M:%S")
else:
self.curr_time = None
self.curr_tile = scratch_load["curr_tile"]
self.daily_plan_req = scratch_load["daily_plan_req"]
self.name = scratch_load["name"]
self.first_name = scratch_load["first_name"]
self.last_name = scratch_load["last_name"]
self.age = scratch_load["age"]
self.innate = scratch_load["innate"]
self.learned = scratch_load["learned"]
self.currently = scratch_load["currently"]
self.lifestyle = scratch_load["lifestyle"]
self.living_area = scratch_load["living_area"]
self.concept_forget = scratch_load["concept_forget"]
self.daily_reflection_time = scratch_load["daily_reflection_time"]
self.daily_reflection_size = scratch_load["daily_reflection_size"]
self.overlap_reflect_th = scratch_load["overlap_reflect_th"]
self.kw_strg_event_reflect_th = scratch_load["kw_strg_event_reflect_th"]
self.kw_strg_thought_reflect_th = scratch_load["kw_strg_thought_reflect_th"]
self.recency_w = scratch_load["recency_w"]
self.relevance_w = scratch_load["relevance_w"]
self.importance_w = scratch_load["importance_w"]
self.recency_decay = scratch_load["recency_decay"]
self.importance_trigger_max = scratch_load["importance_trigger_max"]
self.importance_trigger_curr = scratch_load["importance_trigger_curr"]
self.importance_ele_n = scratch_load["importance_ele_n"]
self.thought_count = scratch_load["thought_count"]
self.daily_req = scratch_load["daily_req"]
self.f_daily_schedule = scratch_load["f_daily_schedule"]
self.f_daily_schedule_hourly_org = scratch_load["f_daily_schedule_hourly_org"]
self.act_address = scratch_load["act_address"]
if scratch_load["act_start_time"]:
self.act_start_time = datetime.datetime.strptime(
scratch_load["act_start_time"],
"%B %d, %Y, %H:%M:%S")
else:
self.curr_time = None
self.act_duration = scratch_load["act_duration"]
self.act_description = scratch_load["act_description"]
self.act_pronunciatio = scratch_load["act_pronunciatio"]
self.act_event = tuple(scratch_load["act_event"])
self.act_obj_description = scratch_load["act_obj_description"]
self.act_obj_pronunciatio = scratch_load["act_obj_pronunciatio"]
self.act_obj_event = tuple(scratch_load["act_obj_event"])
self.chatting_with = scratch_load["chatting_with"]
self.chat = scratch_load["chat"]
self.chatting_with_buffer = scratch_load["chatting_with_buffer"]
if scratch_load["chatting_end_time"]:
self.chatting_end_time = datetime.datetime.strptime(
scratch_load["chatting_end_time"],
"%B %d, %Y, %H:%M:%S")
else:
self.chatting_end_time = None
self.act_path_set = scratch_load["act_path_set"]
self.planned_path = scratch_load["planned_path"]
def save(self, out_json):
"""
Save persona's scratch.
INPUT:
out_json: The file where we wil be saving our persona's state.
OUTPUT:
None
"""
scratch = dict()
scratch["vision_r"] = self.vision_r
scratch["att_bandwidth"] = self.att_bandwidth
scratch["retention"] = self.retention
scratch["curr_time"] = self.curr_time.strftime("%B %d, %Y, %H:%M:%S")
scratch["curr_tile"] = self.curr_tile
scratch["daily_plan_req"] = self.daily_plan_req
scratch["name"] = self.name
scratch["first_name"] = self.first_name
scratch["last_name"] = self.last_name
scratch["age"] = self.age
scratch["innate"] = self.innate
scratch["learned"] = self.learned
scratch["currently"] = self.currently
scratch["lifestyle"] = self.lifestyle
scratch["living_area"] = self.living_area
scratch["concept_forget"] = self.concept_forget
scratch["daily_reflection_time"] = self.daily_reflection_time
scratch["daily_reflection_size"] = self.daily_reflection_size
scratch["overlap_reflect_th"] = self.overlap_reflect_th
scratch["kw_strg_event_reflect_th"] = self.kw_strg_event_reflect_th
scratch["kw_strg_thought_reflect_th"] = self.kw_strg_thought_reflect_th
scratch["recency_w"] = self.recency_w
scratch["relevance_w"] = self.relevance_w
scratch["importance_w"] = self.importance_w
scratch["recency_decay"] = self.recency_decay
scratch["importance_trigger_max"] = self.importance_trigger_max
scratch["importance_trigger_curr"] = self.importance_trigger_curr
scratch["importance_ele_n"] = self.importance_ele_n
scratch["thought_count"] = self.thought_count
scratch["daily_req"] = self.daily_req
scratch["f_daily_schedule"] = self.f_daily_schedule
scratch["f_daily_schedule_hourly_org"] = self.f_daily_schedule_hourly_org
scratch["act_address"] = self.act_address
scratch["act_start_time"] = (self.act_start_time
.strftime("%B %d, %Y, %H:%M:%S"))
scratch["act_duration"] = self.act_duration
scratch["act_description"] = self.act_description
scratch["act_pronunciatio"] = self.act_pronunciatio
scratch["act_event"] = self.act_event
scratch["act_obj_description"] = self.act_obj_description
scratch["act_obj_pronunciatio"] = self.act_obj_pronunciatio
scratch["act_obj_event"] = self.act_obj_event
scratch["chatting_with"] = self.chatting_with
scratch["chat"] = self.chat
scratch["chatting_with_buffer"] = self.chatting_with_buffer
if self.chatting_end_time:
scratch["chatting_end_time"] = (self.chatting_end_time
.strftime("%B %d, %Y, %H:%M:%S"))
else:
scratch["chatting_end_time"] = None
scratch["act_path_set"] = self.act_path_set
scratch["planned_path"] = self.planned_path
with open(out_json, "w") as outfile:
json.dump(scratch, outfile, indent=2)
def get_f_daily_schedule_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule.
Recall that self.f_daily_schedule stores the decomposed action sequences
up until now, and the hourly sequences of the future action for the rest
of today. Given that self.f_daily_schedule is a list of list where the
inner list is composed of [task, duration], we continue to add up the
duration until we reach "if elapsed > today_min_elapsed" condition. The
index where we stop is the index we will return.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
x = 0
for task, duration in self.f_daily_schedule:
x += duration
x = 0
for task, duration in self.f_daily_schedule_hourly_org:
x += duration
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_f_daily_schedule_hourly_org_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule_hourly_org.
It is otherwise the same as get_f_daily_schedule_index.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule_hourly_org:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_str_iss(self):
"""
ISS stands for "identity stable set." This describes the commonset summary
of this persona -- basically, the bare minimum description of the persona
that gets used in almost all prompts that need to call on the persona.
INPUT
None
OUTPUT
the identity stable set summary of the persona in a string form.
EXAMPLE STR OUTPUT
"Name: Dolores Heitmiller
Age: 28
Innate traits: hard-edged, independent, loyal
Learned traits: Dolores is a painter who wants live quietly and paint
while enjoying her everyday life.
Currently: Dolores is preparing for her first solo show. She mostly
works from home.
Lifestyle: Dolores goes to bed around 11pm, sleeps for 7 hours, eats
dinner around 6pm.
Daily plan requirement: Dolores is planning to stay at home all day and
never go out."
"""
commonset = ""
commonset += f"Name: {self.name}\n"
commonset += f"Age: {self.age}\n"
commonset += f"Innate traits: {self.innate}\n"
commonset += f"Learned traits: {self.learned}\n"
commonset += f"Currently: {self.currently}\n"
commonset += f"Lifestyle: {self.lifestyle}\n"
commonset += f"Daily plan requirement: {self.daily_plan_req}\n"
commonset += f"Current Date: {self.curr_time.strftime('%A %B %d')}\n"
return commonset
def get_str_name(self):
return self.name
def get_str_firstname(self):
return self.first_name
def get_str_lastname(self):
return self.last_name
def get_str_age(self):
return str(self.age)
def get_str_innate(self):
return self.innate
def get_str_learned(self):
return self.learned
def get_str_currently(self):
return self.currently
def get_str_lifestyle(self):
return self.lifestyle
def get_str_daily_plan_req(self):
return self.daily_plan_req
def get_str_curr_date_str(self):
return self.curr_time.strftime("%A %B %d")
def get_curr_event(self):
if not self.act_address:
return (self.name, None, None)
else:
return self.act_event
def get_curr_event_and_desc(self):
if not self.act_address:
return (self.name, None, None, None)
else:
return (self.act_event[0],
self.act_event[1],
self.act_event[2],
self.act_description)
def get_curr_obj_event_and_desc(self):
if not self.act_address:
return ("", None, None, None)
else:
return (self.act_address,
self.act_obj_event[1],
self.act_obj_event[2],
self.act_obj_description)
def add_new_action(self,
action_address,
action_duration,
action_description,
action_pronunciatio,
action_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time=None):
self.act_address = action_address
self.act_duration = action_duration
self.act_description = action_description
self.act_pronunciatio = action_pronunciatio
self.act_event = action_event
self.chatting_with = chatting_with
self.chat = chat
if chatting_with_buffer:
self.chatting_with_buffer.update(chatting_with_buffer)
self.chatting_end_time = chatting_end_time
self.act_obj_description = act_obj_description
self.act_obj_pronunciatio = act_obj_pronunciatio
self.act_obj_event = act_obj_event
self.act_start_time = self.curr_time
self.act_path_set = False
def act_time_str(self):
"""
Returns a string output of the current time.
INPUT
None
OUTPUT
A string output of the current time.
EXAMPLE STR OUTPUT
"14:05 P.M."
"""
return self.act_start_time.strftime("%H:%M %p")
def act_check_finished(self):
"""
Checks whether the self.Action instance has finished.
INPUT
curr_datetime: Current time. If current time is later than the action's
start time + its duration, then the action has finished.
OUTPUT
Boolean [True]: Action has finished.
Boolean [False]: Action has not finished and is still ongoing.
"""
if not self.act_address:
return True
if self.chatting_with:
end_time = self.chatting_end_time
else:
x = self.act_start_time
if x.second != 0:
x = x.replace(second=0)
x = (x + datetime.timedelta(minutes=1))
end_time = (x + datetime.timedelta(minutes=self.act_duration))
if end_time.strftime("%H:%M:%S") == self.curr_time.strftime("%H:%M:%S"):
return True
return False
def act_summarize(self):
"""
Summarize the current action as a dictionary.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
exp = dict()
exp["persona"] = self.name
exp["address"] = self.act_address
exp["start_datetime"] = self.act_start_time
exp["duration"] = self.act_duration
exp["description"] = self.act_description
exp["pronunciatio"] = self.act_pronunciatio
return exp
def act_summary_str(self):
"""
Returns a string summary of the current action. Meant to be
human-readable.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
start_datetime_str = self.act_start_time.strftime("%A %B %d -- %H:%M %p")
ret = f"[{start_datetime_str}]\n"
ret += f"Activity: {self.name} is {self.act_description}\n"
ret += f"Address: {self.act_address}\n"
ret += f"Duration in minutes (e.g., x min): {str(self.act_duration)} min\n"
return ret
def get_str_daily_schedule_summary(self):
ret = ""
curr_min_sum = 0
for row in self.f_daily_schedule:
curr_min_sum += row[1]
hour = int(curr_min_sum/60)
minute = curr_min_sum%60
ret += f"{hour:02}:{minute:02} || {row[0]}\n"
return ret
def get_str_daily_schedule_hourly_org_summary(self):
ret = ""
curr_min_sum = 0
for row in self.f_daily_schedule_hourly_org:
curr_min_sum += row[1]
hour = int(curr_min_sum/60)
minute = curr_min_sum%60
ret += f"{hour:02}:{minute:02} || {row[0]}\n"
return ret
| generative_agents-main | reverie/backend_server/persona/memory_structures/scratch.py |
"""
Author: Joon Sung Park ([email protected])
File: spatial_memory.py
Description: Defines the MemoryTree class that serves as the agents' spatial
memory that aids in grounding their behavior in the game world.
"""
import json
import sys
sys.path.append('../../')
from utils import *
from global_methods import *
class MemoryTree:
def __init__(self, f_saved):
self.tree = {}
if check_if_file_exists(f_saved):
self.tree = json.load(open(f_saved))
def print_tree(self):
def _print_tree(tree, depth):
dash = " >" * depth
if type(tree) == type(list()):
if tree:
print (dash, tree)
return
for key, val in tree.items():
if key:
print (dash, key)
_print_tree(val, depth+1)
_print_tree(self.tree, 0)
def save(self, out_json):
with open(out_json, "w") as outfile:
json.dump(self.tree, outfile)
def get_str_accessible_sectors(self, curr_world):
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
x = ", ".join(list(self.tree[curr_world].keys()))
return x
def get_str_accessible_sector_arenas(self, sector):
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
curr_world, curr_sector = sector.split(":")
if not curr_sector:
return ""
x = ", ".join(list(self.tree[curr_world][curr_sector].keys()))
return x
def get_str_accessible_arena_game_objects(self, arena):
"""
Get a str list of all accessible game objects that are in the arena. If
temp_address is specified, we return the objects that are available in
that arena, and if not, we return the objects that are in the arena our
persona is currently in.
INPUT
temp_address: optional arena address
OUTPUT
str list of all accessible game objects in the gmae arena.
EXAMPLE STR OUTPUT
"phone, charger, bed, nightstand"
"""
curr_world, curr_sector, curr_arena = arena.split(":")
if not curr_arena:
return ""
try:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena]))
except:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena.lower()]))
return x
if __name__ == '__main__':
x = f"../../../../environment/frontend_server/storage/the_ville_base_LinFamily/personas/Eddy Lin/bootstrap_memory/spatial_memory.json"
x = MemoryTree(x)
x.print_tree()
print (x.get_str_accessible_sector_arenas("dolores double studio:double studio"))
| generative_agents-main | reverie/backend_server/persona/memory_structures/spatial_memory.py |
"""
Author: Joon Sung Park ([email protected])
File: associative_memory.py
Description: Defines the core long-term memory module for generative agents.
Note (May 1, 2023) -- this class is the Memory Stream module in the generative
agents paper.
"""
import sys
sys.path.append('../../')
import json
import datetime
from global_methods import *
class ConceptNode:
def __init__(self,
node_id, node_count, type_count, node_type, depth,
created, expiration,
s, p, o,
description, embedding_key, poignancy, keywords, filling):
self.node_id = node_id
self.node_count = node_count
self.type_count = type_count
self.type = node_type # thought / event / chat
self.depth = depth
self.created = created
self.expiration = expiration
self.last_accessed = self.created
self.subject = s
self.predicate = p
self.object = o
self.description = description
self.embedding_key = embedding_key
self.poignancy = poignancy
self.keywords = keywords
self.filling = filling
def spo_summary(self):
return (self.subject, self.predicate, self.object)
class AssociativeMemory:
def __init__(self, f_saved):
self.id_to_node = dict()
self.seq_event = []
self.seq_thought = []
self.seq_chat = []
self.kw_to_event = dict()
self.kw_to_thought = dict()
self.kw_to_chat = dict()
self.kw_strength_event = dict()
self.kw_strength_thought = dict()
self.embeddings = json.load(open(f_saved + "/embeddings.json"))
nodes_load = json.load(open(f_saved + "/nodes.json"))
for count in range(len(nodes_load.keys())):
node_id = f"node_{str(count+1)}"
node_details = nodes_load[node_id]
node_count = node_details["node_count"]
type_count = node_details["type_count"]
node_type = node_details["type"]
depth = node_details["depth"]
created = datetime.datetime.strptime(node_details["created"],
'%Y-%m-%d %H:%M:%S')
expiration = None
if node_details["expiration"]:
expiration = datetime.datetime.strptime(node_details["expiration"],
'%Y-%m-%d %H:%M:%S')
s = node_details["subject"]
p = node_details["predicate"]
o = node_details["object"]
description = node_details["description"]
embedding_pair = (node_details["embedding_key"],
self.embeddings[node_details["embedding_key"]])
poignancy =node_details["poignancy"]
keywords = set(node_details["keywords"])
filling = node_details["filling"]
if node_type == "event":
self.add_event(created, expiration, s, p, o,
description, keywords, poignancy, embedding_pair, filling)
elif node_type == "chat":
self.add_chat(created, expiration, s, p, o,
description, keywords, poignancy, embedding_pair, filling)
elif node_type == "thought":
self.add_thought(created, expiration, s, p, o,
description, keywords, poignancy, embedding_pair, filling)
kw_strength_load = json.load(open(f_saved + "/kw_strength.json"))
if kw_strength_load["kw_strength_event"]:
self.kw_strength_event = kw_strength_load["kw_strength_event"]
if kw_strength_load["kw_strength_thought"]:
self.kw_strength_thought = kw_strength_load["kw_strength_thought"]
def save(self, out_json):
r = dict()
for count in range(len(self.id_to_node.keys()), 0, -1):
node_id = f"node_{str(count)}"
node = self.id_to_node[node_id]
r[node_id] = dict()
r[node_id]["node_count"] = node.node_count
r[node_id]["type_count"] = node.type_count
r[node_id]["type"] = node.type
r[node_id]["depth"] = node.depth
r[node_id]["created"] = node.created.strftime('%Y-%m-%d %H:%M:%S')
r[node_id]["expiration"] = None
if node.expiration:
r[node_id]["expiration"] = (node.expiration
.strftime('%Y-%m-%d %H:%M:%S'))
r[node_id]["subject"] = node.subject
r[node_id]["predicate"] = node.predicate
r[node_id]["object"] = node.object
r[node_id]["description"] = node.description
r[node_id]["embedding_key"] = node.embedding_key
r[node_id]["poignancy"] = node.poignancy
r[node_id]["keywords"] = list(node.keywords)
r[node_id]["filling"] = node.filling
with open(out_json+"/nodes.json", "w") as outfile:
json.dump(r, outfile)
r = dict()
r["kw_strength_event"] = self.kw_strength_event
r["kw_strength_thought"] = self.kw_strength_thought
with open(out_json+"/kw_strength.json", "w") as outfile:
json.dump(r, outfile)
with open(out_json+"/embeddings.json", "w") as outfile:
json.dump(self.embeddings, outfile)
def add_event(self, created, expiration, s, p, o,
description, keywords, poignancy,
embedding_pair, filling):
# Setting up the node ID and counts.
node_count = len(self.id_to_node.keys()) + 1
type_count = len(self.seq_event) + 1
node_type = "event"
node_id = f"node_{str(node_count)}"
depth = 0
# Node type specific clean up.
if "(" in description:
description = (" ".join(description.split()[:3])
+ " "
+ description.split("(")[-1][:-1])
# Creating the <ConceptNode> object.
node = ConceptNode(node_id, node_count, type_count, node_type, depth,
created, expiration,
s, p, o,
description, embedding_pair[0],
poignancy, keywords, filling)
# Creating various dictionary cache for fast access.
self.seq_event[0:0] = [node]
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.kw_to_event:
self.kw_to_event[kw][0:0] = [node]
else:
self.kw_to_event[kw] = [node]
self.id_to_node[node_id] = node
# Adding in the kw_strength
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_event:
self.kw_strength_event[kw] += 1
else:
self.kw_strength_event[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return node
def add_thought(self, created, expiration, s, p, o,
description, keywords, poignancy,
embedding_pair, filling):
# Setting up the node ID and counts.
node_count = len(self.id_to_node.keys()) + 1
type_count = len(self.seq_thought) + 1
node_type = "thought"
node_id = f"node_{str(node_count)}"
depth = 1
try:
if filling:
depth += max([self.id_to_node[i].depth for i in filling])
except:
pass
# Creating the <ConceptNode> object.
node = ConceptNode(node_id, node_count, type_count, node_type, depth,
created, expiration,
s, p, o,
description, embedding_pair[0], poignancy, keywords, filling)
# Creating various dictionary cache for fast access.
self.seq_thought[0:0] = [node]
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.kw_to_thought:
self.kw_to_thought[kw][0:0] = [node]
else:
self.kw_to_thought[kw] = [node]
self.id_to_node[node_id] = node
# Adding in the kw_strength
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_thought:
self.kw_strength_thought[kw] += 1
else:
self.kw_strength_thought[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return node
def add_chat(self, created, expiration, s, p, o,
description, keywords, poignancy,
embedding_pair, filling):
# Setting up the node ID and counts.
node_count = len(self.id_to_node.keys()) + 1
type_count = len(self.seq_chat) + 1
node_type = "chat"
node_id = f"node_{str(node_count)}"
depth = 0
# Creating the <ConceptNode> object.
node = ConceptNode(node_id, node_count, type_count, node_type, depth,
created, expiration,
s, p, o,
description, embedding_pair[0], poignancy, keywords, filling)
# Creating various dictionary cache for fast access.
self.seq_chat[0:0] = [node]
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.kw_to_chat:
self.kw_to_chat[kw][0:0] = [node]
else:
self.kw_to_chat[kw] = [node]
self.id_to_node[node_id] = node
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return node
def get_summarized_latest_events(self, retention):
ret_set = set()
for e_node in self.seq_event[:retention]:
ret_set.add(e_node.spo_summary())
return ret_set
def get_str_seq_events(self):
ret_str = ""
for count, event in enumerate(self.seq_event):
ret_str += f'{"Event", len(self.seq_event) - count, ": ", event.spo_summary(), " -- ", event.description}\n'
return ret_str
def get_str_seq_thoughts(self):
ret_str = ""
for count, event in enumerate(self.seq_thought):
ret_str += f'{"Thought", len(self.seq_thought) - count, ": ", event.spo_summary(), " -- ", event.description}'
return ret_str
def get_str_seq_chats(self):
ret_str = ""
for count, event in enumerate(self.seq_chat):
ret_str += f"with {event.object.content} ({event.description})\n"
ret_str += f'{event.created.strftime("%B %d, %Y, %H:%M:%S")}\n'
for row in event.filling:
ret_str += f"{row[0]}: {row[1]}\n"
return ret_str
def retrieve_relevant_thoughts(self, s_content, p_content, o_content):
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.kw_to_thought:
ret += self.kw_to_thought[i.lower()]
ret = set(ret)
return ret
def retrieve_relevant_events(self, s_content, p_content, o_content):
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.kw_to_event:
ret += self.kw_to_event[i]
ret = set(ret)
return ret
def get_last_chat(self, target_persona_name):
if target_persona_name.lower() in self.kw_to_chat:
return self.kw_to_chat[target_persona_name.lower()][0]
else:
return False
| generative_agents-main | reverie/backend_server/persona/memory_structures/associative_memory.py |
"""
Author: Joon Sung Park ([email protected])
File: global_methods.py
Description: Contains functions used throughout my projects.
"""
import random
import string
import csv
import time
import datetime as dt
import pathlib
import os
import sys
import numpy
import math
import shutil, errno
from os import listdir
def create_folder_if_not_there(curr_path):
"""
Checks if a folder in the curr_path exists. If it does not exist, creates
the folder.
Note that if the curr_path designates a file location, it will operate on
the folder that contains the file. But the function also works even if the
path designates to just a folder.
Args:
curr_list: list to write. The list comes in the following form:
[['key1', 'val1-1', 'val1-2'...],
['key2', 'val2-1', 'val2-2'...],]
outfile: name of the csv file to write
RETURNS:
True: if a new folder is created
False: if a new folder is not created
"""
outfolder_name = curr_path.split("/")
if len(outfolder_name) != 1:
# This checks if the curr path is a file or a folder.
if "." in outfolder_name[-1]:
outfolder_name = outfolder_name[:-1]
outfolder_name = "/".join(outfolder_name)
if not os.path.exists(outfolder_name):
os.makedirs(outfolder_name)
return True
return False
def write_list_of_list_to_csv(curr_list_of_list, outfile):
"""
Writes a list of list to csv.
Unlike write_list_to_csv_line, it writes the entire csv in one shot.
ARGS:
curr_list_of_list: list to write. The list comes in the following form:
[['key1', 'val1-1', 'val1-2'...],
['key2', 'val2-1', 'val2-2'...],]
outfile: name of the csv file to write
RETURNS:
None
"""
create_folder_if_not_there(outfile)
with open(outfile, "w") as f:
writer = csv.writer(f)
writer.writerows(curr_list_of_list)
def write_list_to_csv_line(line_list, outfile):
"""
Writes one line to a csv file.
Unlike write_list_of_list_to_csv, this opens an existing outfile and then
appends a line to that file.
This also works if the file does not exist already.
ARGS:
curr_list: list to write. The list comes in the following form:
['key1', 'val1-1', 'val1-2'...]
Importantly, this is NOT a list of list.
outfile: name of the csv file to write
RETURNS:
None
"""
create_folder_if_not_there(outfile)
# Opening the file first so we can write incrementally as we progress
curr_file = open(outfile, 'a',)
csvfile_1 = csv.writer(curr_file)
csvfile_1.writerow(line_list)
curr_file.close()
def read_file_to_list(curr_file, header=False, strip_trail=True):
"""
Reads in a csv file to a list of list. If header is True, it returns a
tuple with (header row, all rows)
ARGS:
curr_file: path to the current csv file.
RETURNS:
List of list where the component lists are the rows of the file.
"""
if not header:
analysis_list = []
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
if strip_trail:
row = [i.strip() for i in row]
analysis_list += [row]
return analysis_list
else:
analysis_list = []
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
if strip_trail:
row = [i.strip() for i in row]
analysis_list += [row]
return analysis_list[0], analysis_list[1:]
def read_file_to_set(curr_file, col=0):
"""
Reads in a "single column" of a csv file to a set.
ARGS:
curr_file: path to the current csv file.
RETURNS:
Set with all items in a single column of a csv file.
"""
analysis_set = set()
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
analysis_set.add(row[col])
return analysis_set
def get_row_len(curr_file):
"""
Get the number of rows in a csv file
ARGS:
curr_file: path to the current csv file.
RETURNS:
The number of rows
False if the file does not exist
"""
try:
analysis_set = set()
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
analysis_set.add(row[0])
return len(analysis_set)
except:
return False
def check_if_file_exists(curr_file):
"""
Checks if a file exists
ARGS:
curr_file: path to the current csv file.
RETURNS:
True if the file exists
False if the file does not exist
"""
try:
with open(curr_file) as f_analysis_file: pass
return True
except:
return False
def find_filenames(path_to_dir, suffix=".csv"):
"""
Given a directory, find all files that ends with the provided suffix and
returns their paths.
ARGS:
path_to_dir: Path to the current directory
suffix: The target suffix.
RETURNS:
A list of paths to all files in the directory.
"""
filenames = listdir(path_to_dir)
return [ path_to_dir+"/"+filename
for filename in filenames if filename.endswith( suffix ) ]
def average(list_of_val):
"""
Finds the average of the numbers in a list.
ARGS:
list_of_val: a list of numeric values
RETURNS:
The average of the values
"""
return sum(list_of_val)/float(len(list_of_val))
def std(list_of_val):
"""
Finds the std of the numbers in a list.
ARGS:
list_of_val: a list of numeric values
RETURNS:
The std of the values
"""
std = numpy.std(list_of_val)
return std
def copyanything(src, dst):
"""
Copy over everything in the src folder to dst folder.
ARGS:
src: address of the source folder
dst: address of the destination folder
RETURNS:
None
"""
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno in (errno.ENOTDIR, errno.EINVAL):
shutil.copy(src, dst)
else: raise
if __name__ == '__main__':
pass
| generative_agents-main | environment/frontend_server/global_methods.py |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend_server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| generative_agents-main | environment/frontend_server/manage.py |
generative_agents-main | environment/frontend_server/frontend_server/__init__.py |
|
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
| generative_agents-main | environment/frontend_server/frontend_server/utils.py |
"""frontend_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from translator import views as translator_views
urlpatterns = [
url(r'^$', translator_views.landing, name='landing'),
url(r'^simulator_home$', translator_views.home, name='home'),
url(r'^demo/(?P<sim_code>[\w-]+)/(?P<step>[\w-]+)/(?P<play_speed>[\w-]+)/$', translator_views.demo, name='demo'),
url(r'^replay/(?P<sim_code>[\w-]+)/(?P<step>[\w-]+)/$', translator_views.replay, name='replay'),
url(r'^replay_persona_state/(?P<sim_code>[\w-]+)/(?P<step>[\w-]+)/(?P<persona_name>[\w-]+)/$', translator_views.replay_persona_state, name='replay_persona_state'),
url(r'^process_environment/$', translator_views.process_environment, name='process_environment'),
url(r'^update_environment/$', translator_views.update_environment, name='update_environment'),
url(r'^path_tester/$', translator_views.path_tester, name='path_tester'),
url(r'^path_tester_update/$', translator_views.path_tester_update, name='path_tester_update'),
path('admin/', admin.site.urls),
]
| generative_agents-main | environment/frontend_server/frontend_server/urls.py |
"""
WSGI config for frontend_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend_server.settings')
application = get_wsgi_application()
| generative_agents-main | environment/frontend_server/frontend_server/wsgi.py |
"""
Django settings for frontend_server project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c7l%1%b=2sh$o9zqvd4i*h8*__^@-5sm-y)m(1ib2t92)43@62'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'translator',
'corsheaders',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'frontend_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'frontend_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_dirs"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_root")
| generative_agents-main | environment/frontend_server/frontend_server/settings/local.py |
###FOR PUSHING STATIC TO AWS
# from .base import *
# from .production import *
# try:
# from .local import *
# except:
# pass
###FOR GENERAL USES
from .base import *
try:
from .local import *
live = False
except:
live = True
if live:
from .production import *
| generative_agents-main | environment/frontend_server/frontend_server/settings/__init__.py |
"""
Django settings for frontend_server project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c7l%1%b=2sh$o9zqvd4i*h8*__^@-5sm-y)m(1ib2t92)43@62'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'translator',
'corsheaders',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'frontend_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'frontend_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_dirs"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_root")
# CORS_ORIGIN_WHITELIST = [
# 'http://127.0.0.1:8080'
# ]
# CORS_ORIGIN_ALLOW_ALL = True
# CORS_ALLOW_CREDENTIALS = False
| generative_agents-main | environment/frontend_server/frontend_server/settings/base.py |
from django.db import models | generative_agents-main | environment/frontend_server/translator/models.py |
generative_agents-main | environment/frontend_server/translator/__init__.py |
|
from django.apps import AppConfig
class TranslatorConfig(AppConfig):
name = 'translator'
| generative_agents-main | environment/frontend_server/translator/apps.py |
from django.contrib import admin
from .models import *
| generative_agents-main | environment/frontend_server/translator/admin.py |
from django.test import TestCase
# Create your tests here.
| generative_agents-main | environment/frontend_server/translator/tests.py |
"""
Author: Joon Sung Park ([email protected])
File: views.py
"""
import os
import string
import random
import json
from os import listdir
import os
import datetime
from django.shortcuts import render, redirect, HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from global_methods import *
from django.contrib.staticfiles.templatetags.staticfiles import static
from .models import *
def landing(request):
context = {}
template = "landing/landing.html"
return render(request, template, context)
def demo(request, sim_code, step, play_speed="2"):
move_file = f"compressed_storage/{sim_code}/master_movement.json"
meta_file = f"compressed_storage/{sim_code}/meta.json"
step = int(step)
play_speed_opt = {"1": 1, "2": 2, "3": 4,
"4": 8, "5": 16, "6": 32}
if play_speed not in play_speed_opt: play_speed = 2
else: play_speed = play_speed_opt[play_speed]
# Loading the basic meta information about the simulation.
meta = dict()
with open (meta_file) as json_file:
meta = json.load(json_file)
sec_per_step = meta["sec_per_step"]
start_datetime = datetime.datetime.strptime(meta["start_date"] + " 00:00:00",
'%B %d, %Y %H:%M:%S')
for i in range(step):
start_datetime += datetime.timedelta(seconds=sec_per_step)
start_datetime = start_datetime.strftime("%Y-%m-%dT%H:%M:%S")
# Loading the movement file
raw_all_movement = dict()
with open(move_file) as json_file:
raw_all_movement = json.load(json_file)
# Loading all names of the personas
persona_names = dict()
persona_names = []
persona_names_set = set()
for p in list(raw_all_movement["0"].keys()):
persona_names += [{"original": p,
"underscore": p.replace(" ", "_"),
"initial": p[0] + p.split(" ")[-1][0]}]
persona_names_set.add(p)
# <all_movement> is the main movement variable that we are passing to the
# frontend. Whereas we use ajax scheme to communicate steps to the frontend
# during the simulation stage, for this demo, we send all movement
# information in one step.
all_movement = dict()
# Preparing the initial step.
# <init_prep> sets the locations and descriptions of all agents at the
# beginning of the demo determined by <step>.
init_prep = dict()
for int_key in range(step+1):
key = str(int_key)
val = raw_all_movement[key]
for p in persona_names_set:
if p in val:
init_prep[p] = val[p]
persona_init_pos = dict()
for p in persona_names_set:
persona_init_pos[p.replace(" ","_")] = init_prep[p]["movement"]
all_movement[step] = init_prep
# Finish loading <all_movement>
for int_key in range(step+1, len(raw_all_movement.keys())):
all_movement[int_key] = raw_all_movement[str(int_key)]
context = {"sim_code": sim_code,
"step": step,
"persona_names": persona_names,
"persona_init_pos": json.dumps(persona_init_pos),
"all_movement": json.dumps(all_movement),
"start_datetime": start_datetime,
"sec_per_step": sec_per_step,
"play_speed": play_speed,
"mode": "demo"}
template = "demo/demo.html"
return render(request, template, context)
def UIST_Demo(request):
return demo(request, "March20_the_ville_n25_UIST_RUN-step-1-141", 2160, play_speed="3")
def home(request):
f_curr_sim_code = "temp_storage/curr_sim_code.json"
f_curr_step = "temp_storage/curr_step.json"
if not check_if_file_exists(f_curr_step):
context = {}
template = "home/error_start_backend.html"
return render(request, template, context)
with open(f_curr_sim_code) as json_file:
sim_code = json.load(json_file)["sim_code"]
with open(f_curr_step) as json_file:
step = json.load(json_file)["step"]
os.remove(f_curr_step)
persona_names = []
persona_names_set = set()
for i in find_filenames(f"storage/{sim_code}/personas", ""):
x = i.split("/")[-1].strip()
if x[0] != ".":
persona_names += [[x, x.replace(" ", "_")]]
persona_names_set.add(x)
persona_init_pos = []
file_count = []
for i in find_filenames(f"storage/{sim_code}/environment", ".json"):
x = i.split("/")[-1].strip()
if x[0] != ".":
file_count += [int(x.split(".")[0])]
curr_json = f'storage/{sim_code}/environment/{str(max(file_count))}.json'
with open(curr_json) as json_file:
persona_init_pos_dict = json.load(json_file)
for key, val in persona_init_pos_dict.items():
if key in persona_names_set:
persona_init_pos += [[key, val["x"], val["y"]]]
context = {"sim_code": sim_code,
"step": step,
"persona_names": persona_names,
"persona_init_pos": persona_init_pos,
"mode": "simulate"}
template = "home/home.html"
return render(request, template, context)
def replay(request, sim_code, step):
sim_code = sim_code
step = int(step)
persona_names = []
persona_names_set = set()
for i in find_filenames(f"storage/{sim_code}/personas", ""):
x = i.split("/")[-1].strip()
if x[0] != ".":
persona_names += [[x, x.replace(" ", "_")]]
persona_names_set.add(x)
persona_init_pos = []
file_count = []
for i in find_filenames(f"storage/{sim_code}/environment", ".json"):
x = i.split("/")[-1].strip()
if x[0] != ".":
file_count += [int(x.split(".")[0])]
curr_json = f'storage/{sim_code}/environment/{str(max(file_count))}.json'
with open(curr_json) as json_file:
persona_init_pos_dict = json.load(json_file)
for key, val in persona_init_pos_dict.items():
if key in persona_names_set:
persona_init_pos += [[key, val["x"], val["y"]]]
context = {"sim_code": sim_code,
"step": step,
"persona_names": persona_names,
"persona_init_pos": persona_init_pos,
"mode": "replay"}
template = "home/home.html"
return render(request, template, context)
def replay_persona_state(request, sim_code, step, persona_name):
sim_code = sim_code
step = int(step)
persona_name_underscore = persona_name
persona_name = " ".join(persona_name.split("_"))
memory = f"storage/{sim_code}/personas/{persona_name}/bootstrap_memory"
if not os.path.exists(memory):
memory = f"compressed_storage/{sim_code}/personas/{persona_name}/bootstrap_memory"
with open(memory + "/scratch.json") as json_file:
scratch = json.load(json_file)
with open(memory + "/spatial_memory.json") as json_file:
spatial = json.load(json_file)
with open(memory + "/associative_memory/nodes.json") as json_file:
associative = json.load(json_file)
a_mem_event = []
a_mem_chat = []
a_mem_thought = []
for count in range(len(associative.keys()), 0, -1):
node_id = f"node_{str(count)}"
node_details = associative[node_id]
if node_details["type"] == "event":
a_mem_event += [node_details]
elif node_details["type"] == "chat":
a_mem_chat += [node_details]
elif node_details["type"] == "thought":
a_mem_thought += [node_details]
context = {"sim_code": sim_code,
"step": step,
"persona_name": persona_name,
"persona_name_underscore": persona_name_underscore,
"scratch": scratch,
"spatial": spatial,
"a_mem_event": a_mem_event,
"a_mem_chat": a_mem_chat,
"a_mem_thought": a_mem_thought}
template = "persona_state/persona_state.html"
return render(request, template, context)
def path_tester(request):
context = {}
template = "path_tester/path_tester.html"
return render(request, template, context)
def process_environment(request):
"""
<FRONTEND to BACKEND>
This sends the frontend visual world information to the backend server.
It does this by writing the current environment representation to
"storage/environment.json" file.
ARGS:
request: Django request
RETURNS:
HttpResponse: string confirmation message.
"""
# f_curr_sim_code = "temp_storage/curr_sim_code.json"
# with open(f_curr_sim_code) as json_file:
# sim_code = json.load(json_file)["sim_code"]
data = json.loads(request.body)
step = data["step"]
sim_code = data["sim_code"]
environment = data["environment"]
with open(f"storage/{sim_code}/environment/{step}.json", "w") as outfile:
outfile.write(json.dumps(environment, indent=2))
return HttpResponse("received")
def update_environment(request):
"""
<BACKEND to FRONTEND>
This sends the backend computation of the persona behavior to the frontend
visual server.
It does this by reading the new movement information from
"storage/movement.json" file.
ARGS:
request: Django request
RETURNS:
HttpResponse
"""
# f_curr_sim_code = "temp_storage/curr_sim_code.json"
# with open(f_curr_sim_code) as json_file:
# sim_code = json.load(json_file)["sim_code"]
data = json.loads(request.body)
step = data["step"]
sim_code = data["sim_code"]
response_data = {"<step>": -1}
if (check_if_file_exists(f"storage/{sim_code}/movement/{step}.json")):
with open(f"storage/{sim_code}/movement/{step}.json") as json_file:
response_data = json.load(json_file)
response_data["<step>"] = step
return JsonResponse(response_data)
def path_tester_update(request):
"""
Processing the path and saving it to path_tester_env.json temp storage for
conducting the path tester.
ARGS:
request: Django request
RETURNS:
HttpResponse: string confirmation message.
"""
data = json.loads(request.body)
camera = data["camera"]
with open(f"temp_storage/path_tester_env.json", "w") as outfile:
outfile.write(json.dumps(camera, indent=2))
return HttpResponse("received")
| generative_agents-main | environment/frontend_server/translator/views.py |
# Generated by Django 2.2 on 2023-03-27 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translator', '0002_evaldata_target_agent'),
]
operations = [
migrations.AlterField(
model_name='evaldata',
name='target_agent',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| generative_agents-main | environment/frontend_server/translator/migrations/0003_auto_20230327_0851.py |
generative_agents-main | environment/frontend_server/translator/migrations/__init__.py |
|
# Generated by Django 2.2 on 2023-03-30 02:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translator', '0003_auto_20230327_0851'),
]
operations = [
migrations.AddField(
model_name='evaldata',
name='q1_v5',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='evaldata',
name='q2_v5',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='evaldata',
name='q3_v5',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='evaldata',
name='q4_v5',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='evaldata',
name='q5_v5',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| generative_agents-main | environment/frontend_server/translator/migrations/0004_auto_20230330_0204.py |
# Generated by Django 2.2 on 2023-03-27 06:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EvalData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prolific_id', models.CharField(max_length=127)),
('attention_check', models.CharField(max_length=256)),
('q1_v1', models.IntegerField()),
('q1_v2', models.IntegerField()),
('q1_v3', models.IntegerField()),
('q1_v4', models.IntegerField()),
('q2_v1', models.IntegerField()),
('q2_v2', models.IntegerField()),
('q2_v3', models.IntegerField()),
('q2_v4', models.IntegerField()),
('q3_v1', models.IntegerField()),
('q3_v2', models.IntegerField()),
('q3_v3', models.IntegerField()),
('q3_v4', models.IntegerField()),
('q4_v1', models.IntegerField()),
('q4_v2', models.IntegerField()),
('q4_v3', models.IntegerField()),
('q4_v4', models.IntegerField()),
('q5_v1', models.IntegerField()),
('q5_v2', models.IntegerField()),
('q5_v3', models.IntegerField()),
('q5_v4', models.IntegerField()),
('rank', models.CharField(max_length=256)),
('justification', models.CharField(max_length=2055)),
],
),
]
| generative_agents-main | environment/frontend_server/translator/migrations/0001_initial.py |
# Generated by Django 2.2 on 2023-03-27 08:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translator', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='evaldata',
name='target_agent',
field=models.IntegerField(blank=True, null=True),
),
]
| generative_agents-main | environment/frontend_server/translator/migrations/0002_evaldata_target_agent.py |
# Generated by Django 2.2 on 2023-07-02 06:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('translator', '0004_auto_20230330_0204'),
]
operations = [
migrations.DeleteModel(
name='EvalData',
),
]
| generative_agents-main | environment/frontend_server/translator/migrations/0005_delete_evaldata.py |
import random
from collections import Counter
from typing import List, Tuple, Dict
def sample_outputs(prompt: str, question: str, language_model, num_samples: int) -> List[Tuple[str, str]]:
sampled_outputs = []
for _ in range(num_samples):
output = language_model.generate(prompt, question)
reasoning_path, answer = parse_output(output)
sampled_outputs.append((reasoning_path, answer))
return sampled_outputs
def get_rejected_reason(sampled_outputs: List[Tuple[str, str]]) -> Dict[str, Any]:
rejection_reasons = {}
for output in sampled_outputs:
reasoning_paths, answer = output
reason = check_rejection_reason(reasoning_paths, answer)
if reason:
rejection_reasons[reasoning_path] = reason
return rejection_reasons
def check_rejection_reason(reasoning_path, answer: str) -> Optional[Any]:
#implement an function to check the reasons for rejections
pass
def adjusted_outputs(sampled_outputs: List[Tuple[str, str]], rejection_reasoning: Dict[str, Any]) -> List[Tuple[str, str]]:
adjusted_outputs = []
for output in sampled_outputs:
reasoning_paths, answer = output
if reasoning_paths not in rejection_reasonings:
adjusted_outputs.append(output)
else:
adjusted_outputs_apth = adjust_reasoning_path(reasoning_path, rejection_reasons)
adjusted_outputs.append((adjusted_reasoning_path, answer))
return adjusted_outputs
def adjust_reasoning_path(reasoning_path: str, rejection_reasons: Dict[str, Any]) -> str:
#implement function to adjust the reasoning reasoning path based on the rehected reasons
pass
def parse_output(output: str) -> Tuple[str, str]:
# Implement a function to parse the output into reasoning_path and answer
pass
def aggregate_answers(sampled_outputs: List[Tuple[str, str]]) -> List[str]:
answers = [output[1] for output in sampled_outputs]
return answers
def find_most_consistent_answer(aggregated_answers: List[str]) -> str:
counter = Counter(aggregated_answers)
most_consistent_answer, _ = counter.most_common(1)[0]
return most_consistent_answer
def self_consistency(prompt: str, question: str, language_model, num_samples: int) -> str:
sampled_outputs = sample_outputs(prompt, question, language_model, num_samples)
rejection_reasons = get_rejection_reasons(sampled_outputs)
adjusted_outputs = adjust_outputs(sampled_outputs, rejection_reasons)
aggregated_answers = aggregate_answers(adjusted_outputs)
most_consistent_answer = find_most_consistent_answer(aggregated_answers)
return most_consistent_answer | COT-SC-main | sc.py |
import torch
from medpalm.model import MedPalm
#usage
img = torch.randn(1, 3, 256, 256)
caption = torch.randint(0, 20000, (1, 1024))
model = MedPalm()
output = model(img, caption)
print(output.shape) # (1, 1024, 20000)
| Med-PaLM-main | example.py |
from medpalm.model import MedPalm | Med-PaLM-main | medpalm/__init__.py |
import torch
import torch.nn as nn
from transformers import AutoTokenizer, CLIPProcessor
from medpalm.transformer import (
AutoregressiveWrapper,
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
class MedPalmTokenizer:
def __init__(self):
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
except Exception as e:
print(f"Error init tokenizer: {e}")
def tokenize_texts(self, texts):
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize_images(self, images):
try:
tokenized_images = self.processor(images=images, return_tensors="pt").pixel_values
print(f"Tokenized image: {tokenized_images.shape}")
return tokenized_images
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize(self, sample):
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
print(f"Error during tokenization {e}")
# class MedPalm(nn.Module):
# """
# MedPalm is a transformer-based model architecture. It initializes with
# a Transformer and AutoregressiveWrapper with default or user-specified parameters.
# Initialize the model with specified or default parameters.
# Args:
# - num_tokens: Number of tokens in the vocabulary
# - max_seq_len: Maximum sequence length
# - dim: Dimension of the model
# - depth: Depth of the model
# - dim_head: Dimension of the model head
# - heads: Number of heads
# - use_abs_pos_emb: Whether to use absolute position embedding
# - alibi_pos_bias: Alibi position bias
# - alibi_num_heads: Number of alibi heads
# - rotary_xpos: Rotary position
# - attn_flash: Attention flash
# - deepnorm: Deep normalization
# - shift_tokens: Number of tokens to shift
# - attn_one_kv_head: Attention one key/value head
# - qk_norm: Query-key normalization
# - attn_qk_norm: Attention query-key normalization
# - attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
# - embedding_provider: Embedding provider module
# """
# def __init__(self,
# num_tokens=20000,
# max_seq_len=4096,
# dim=2560,
# depth=32,
# dim_head=128,
# heads=24,
# use_abs_pos_emb=False,
# alibi_pos_bias=True,
# alibi_num_heads=12,
# rotary_xpos=True,
# attn_flash=True,
# image_size=256,
# patch_size=32,
# attn_one_kv_head=False, # multiquery attention
# qk_norm=True,
# attn_qk_norm=False,
# attn_qk_norm_dim_scale=False,
# ):
# super(MedPalm, self).__init__()
# self.encoder = ViTransformerWrapper(
# image_size=image_size,
# patch_size=patch_size,
# attn_layers=Encoder(
# dim=dim,
# depth=depth,
# dim_head=dim_head,
# heads=heads
# )
# )
# self.decoder = Transformer(
# num_tokens=num_tokens,
# max_seq_len=max_seq_len,
# use_abs_pos_emb=use_abs_pos_emb,
# attn_layers=Decoder(
# dim=dim,
# depth=depth,
# dim_head=dim_head,
# heads=heads,
# alibi_pos_bias=alibi_pos_bias,
# alibi_num_heads=alibi_num_heads,
# rotary_xpos=rotary_xpos,
# attn_flash=attn_flash,
# attn_one_kv_head=False,
# qk_norm=qk_norm,
# attn_qk_norm=False,
# attn_qk_norm_dim_scale=False,
# cross_attend=True
# )
# )
# # self.decoder = AutoregressiveWrapper(self.decoder)
# def forward(self, text_tokens, img, **kwargs):
# """
# Forward pass through the model. It expects the input text_tokens.
# Args:
# - text_tokens: Input tokens
# - kwargs: Other arguments
# Returns:
# - output from the decoder
# """
# try:
# print(f"Text tokens shape: {text_tokens.shape}")
# encoded = self.encoder(img, return_embeddings=True)
# print(encoded.shape)
# return self.decoder(text_tokens, context=encoded)
# except Exception as error:
# print(f"Failed in forward method: {error}")
# raise
class MedPalm(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(MedPalm, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
| Med-PaLM-main | medpalm/model.py |
from functools import partial
from typing import Optional
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | Med-PaLM-main | medpalm/attend.py |
import math
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
# constants
from math import ceil
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import pack, rearrange, reduce, repeat, unpack
from torch import Tensor, einsum, nn
from medpalm.attend import Attend, Intermediates
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out | Med-PaLM-main | medpalm/transformer.py |
from setuptools import setup, find_packages
setup(
name = 'Sophia-Optimizer',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='APACHE',
description = 'Sophia Optimizer ULTRA FAST',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/Sophia',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'torch',
'datasets',
'transformers',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | Sophia-main | setup.py |
import torch
from torch import nn
from sophia.sophia import SophiaG
#define super simple model
model = nn.Sequential(
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2)
)
#define a loss func
loss = nn.CrossEntropyLoss()
#optimize
optimizer = SophiaG(model.parameters(), lr=0.01, betas=(0.9, 0.999), rho=0.04,
weight_decay=0.01, maximize=False, capturable=False, dynamic=True)
#generate some random data
inputs = torch.randn(1, 10)
targets = torch.randint(0, 2, (1,))
#forward pass
outputs = model(inputs)
loss = loss(outputs, targets)
#backward pass and optimization
loss.backward()
optimizer.step()
#clear the gradients for the next iteration
optimizer.zero_grad() | Sophia-main | example.py |
import torch
import multiprocessing
from itertools import chain
from datasets import load_dataset
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from transformers import DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
# from Sophia.decoupled_sophia.decoupled_sophia import DecoupledSophia, HutchinsonEstimator
from sophia.sophia import DecoupledSophia
from transformers import AutoTokenizer
# Load and preprocess the OpenWebText dataset
class CFG:
SEQ_LEN: int = 1024
NUM_CPU: int = multiprocessing.cpu_count()
TOKENIZER: str = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(CFG.TOKENIZER)
dataset = load_dataset("openwebtext")
def tokenize_function(example):
return tokenizer(example["text"] + tokenizer.eos_token)
tokenized_dataset = dataset.map(
tokenize_function,
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=CFG.NUM_CPU,
)
# Initialize the GPT-2 model and tokenizer
config = GPT2Config.from_pretrained("gpt2", n_ctx=1024)
model = GPT2LMHeadModel.from_pretrained("gpt2", config=config)
# Initialize the DecoupledSophia optimizer
optimizer = DecoupledSophia(model.parameters(), lr=1e-3, betas=(0.9, 0.999), rho=0.04, weight_decay=1e-1, k=10, estimator="Hutchinson")
# Set up the training arguments
training_args = TrainingArguments(
output_dir="output",
overwrite_output_dir=True,
num_train_epochs=3,
per_device_train_batch_size=480,
save_steps=10_000,
save_total_limit=2,
prediction_loss_only=True,
gradient_accumulation_steps=1,
gradient_clipping=1.0,
learning_rate_scheduler_type="cosine",
warmup_steps=2000,
report_to="none",
)
# Create the Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False),
train_dataset=train_dataset,
optimizers=(optimizer, None),
)
# Train the model
trainer.train()
# Evaluate the model
eval_results = trainer.evaluate()
print(f"Perplexity: {torch.exp(torch.tensor(eval_results['eval_loss']))}") | Sophia-main | experiments/training.py |
import multiprocessing
from itertools import chain
from datasets import load_dataset
from transformers import AutoTokenizer
class CFG:
SEQ_LEN: int = 1024
NUM_CPU: int = multiprocessing.cpu_count()
TOKENIZER: str = "gpt2"
def build_dataset():
tokenizer = AutoTokenizer.from_pretrained(CFG.TOKENIZER)
dataset = load_dataset("openwebtext")
def tokenize_function(example):
return tokenizer(example["text"] + tokenizer.eos_token)
tokenized_dataset = dataset.map(
tokenize_function,
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
processed_dataset = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=CFG.NUM_CPU,
)
# Save the preprocessed dataset
processed_dataset.save_to_disk("dataset")
if __name__ == '__main__':
build_dataset() | Sophia-main | experiments/preprocssing/build_dataset.py |
from sophia.sophia import SophiaG, sophiag | Sophia-main | Sophia/__init__.py |
import math
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List, Optional
class SophiaG(Optimizer):
"""
SophiaG optimizer class.
"""
def __init__(self, params, lr=1e-4, betas=(0.965, 0.99), rho = 0.04,
weight_decay=1e-1, *, maximize: bool = False,
capturable: bool = False, dynamic: bool = False):
"""
Initialize the optimizer.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= rho:
raise ValueError("Invalid rho parameter at index 1: {}".format(rho))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, rho=rho,
weight_decay=weight_decay,
maximize=maximize, capturable=capturable, dynamic=dynamic)
super(SophiaG, self).__init__(params, defaults)
def __setstate__(self, state):
"""
Set the state of the optimizer.
"""
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('maximize', False)
group.setdefault('capturable', False)
group.setdefault('dynamic', False)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def update_hessian(self):
"""
Update the hessian.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'].mul_(beta2).addcmul_(p.grad, p.grad, value=1 - beta2)
@torch.no_grad()
def update_exp_avg(self):
"""
Update the exponential average.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['exp_avg'].mul_(beta1).add_(p.grad, alpha=1 - beta1)
@torch.no_grad()
def step(self, closure=None, bs=5120):
"""
Perform a step of the optimizer.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self.update_hessian()
self.update_exp_avg()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
state_steps = []
hessian = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Hero does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
state_steps.append(state['step'])
hessian.append(state['hessian'])
if self.defaults['capturable']:
bs = torch.ones((1,), dtype=torch.float, device=p.device) * bs
self._sophiag(params_with_grad,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=group['rho'],
lr=group['lr'],
weight_decay=group['weight_decay'],
maximize=group['maximize'],
capturable=group['capturable'])
return loss
def _sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
capturable: bool = False,
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool):
"""
SophiaG function.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
self._single_tensor_sophiag(params,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=rho,
lr=lr,
weight_decay=weight_decay,
maximize=maximize,
capturable=capturable)
def _single_tensor_sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool,
capturable: bool):
"""
SophiaG function for single tensor.
"""
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
hess = hessian[i]
step_t = state_steps[i]
if capturable:
assert param.is_cuda and step_t.is_cuda and bs.is_cuda
if torch.is_complex(param):
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
hess = torch.view_as_real(hess)
param = torch.view_as_real(param)
# update step
step_t += 1
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
if capturable:
step = step_t
step_size = lr
step_size_neg = step_size.neg()
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg)
else:
step = step_t.item()
step_size_neg = - lr
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg) | Sophia-main | Sophia/Sophia.py |
MIT-main | MIT/model.py |
|
import torch
from nevax.model import Neva
#usage
img = torch.randn(1, 3, 256, 256)
caption = torch.randint(0, 20000, (1, 1024))
model = Neva()
output = model(img, caption)
print(output.shape) # (1, 1024, 20000)
| NeVA-main | example.py |
from nevax.model import Neva
| NeVA-main | nevax/__init__.py |
import torch
import torch.nn as nn
from transformers import AutoTokenizer, CLIPProcessor
from nevax.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
class NevaTokenizer:
def __init__(self):
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
except Exception as e:
print(f"Error init tokenizer: {e}")
def tokenize_texts(self, texts):
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize_images(self, images):
try:
tokenized_images = self.processor(images=images, return_tensors="pt").pixel_values
print(f"Tokenized image: {tokenized_images.shape}")
return tokenized_images
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize(self, sample):
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
print(f"Error during tokenization {e}")
class Neva(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(Neva, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
| NeVA-main | nevax/model.py |
from functools import partial
from typing import Optional
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | NeVA-main | nevax/attend.py |
import math
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
# constants
from math import ceil
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import pack, rearrange, reduce, repeat, unpack
from torch import Tensor, einsum, nn
from nevax.attend import Attend, Intermediates
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out | NeVA-main | nevax/transformer.py |
Gemini-main | geminix/__init__.py |
|
class Gemini:
def __init__(self):
pass
| Gemini-main | geminix/model.py |
import math
import torch
from functools import partial
from torch import nn, einsum
from torch.autograd.function import Function
from einops import rearrange
from torch.jit import fork, wait
from torch.cuda.amp import autocast, GradScaler
from torch.nn import DataParallel
# constants
EPSILON = 1e-10
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# flash attention forwards and backwards
# flash attention v1 - https://arxiv.org/abs/2205.14135
# flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
""" Algorithm 1 in the v2 paper """
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device)
scale = (q.shape[-1] ** -0.5)
num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size)
num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size)
if exists(mask) and mask.ndim == 2:
mask = rearrange(mask, 'b n -> b 1 1 n')
if not exists(mask):
col_masks = (None,) * num_col_tiles
mask = (col_masks,) * num_row_tiles
else:
mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2)
mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
all_row_maxes.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(col_mask):
attn_weights.masked_fill_(~col_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
block_row_maxes = attn_weights.amax(dim = -1, keepdims = True)
new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
exp_weights = torch.exp(attn_weights - new_row_maxes)
if exists(col_mask):
exp_weights.masked_fill_(~col_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
new_row_sums = exp_row_max_diff * row_sums + block_row_sums
oc.mul_(exp_row_max_diff).add_(exp_values)
row_maxes.copy_(new_row_maxes)
row_sums.copy_(new_row_sums)
oc.div_(row_sums)
lse = all_row_sums.log() + all_row_maxes
ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, lse)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
""" Algorithm 2 in the v2 paper """
causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, lse = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
lse.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
p = torch.exp(attn_weights - lsec)
if exists(col_mask):
p.masked_fill_(~col_mask, 0.)
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None
# main class
# just flash attention in plain pytorch
# it will be way slower than implementing it in CUDA
# for tinkering and educational purposes
class FlashAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
q_bucket_size = 512,
k_bucket_size = 1024,
parallel = False,
mixed_precision = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.parallel = parallel
self.mixed_precision = mixed_precision
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
if self.parallel:
self.model = DataParallel(self)
if self.mixed_precision:
self.scaler = GradScaler()
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
if self.parallel:
# Split the input data into chunks and move each chunk to the correct GPU
num_gpus = torch.cuda.device_count()
x_chunks = x.split(x.size(0) // num_gpus)
x_chunks = [chunk.to(f'cuda:{i}') for i, chunk in enumerate(x_chunks)]
q = x_chunks
if self.mixed_precision:
# Use autocast to allow operations to run in lower precision
with autocast():
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
else:
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
| FlashAttention20-main | attention.py |
import torch
import torch.cuda
import time
import psutil
from attention import FlashAttention
def test_memory_usage():
attention = FlashAttention(dim=512, heads=8, dim_head=64).cuda()
x = torch.randn(1, 1000, 512).cuda()
torch.cuda.synchronize()
start_mem = torch.cuda.memory_allocated()
out = attention(x)
torch.cuda.synchronize()
end_mem = torch.cuda.memory_allocated()
print(f'Memory usage: {end_mem - start_mem} bytes')
def test_speed():
attention = FlashAttention(dim=512, heads=8, dim_head=64).cuda()
x = torch.randn(1, 1000, 512).cuda()
start_time = time.time()
out = attention(x)
torch.cuda.synchronize()
end_time = time.time()
print(f'Execution time: {end_time - start_time} seconds')
def test_scalability():
attention = FlashAttention(dim=512, heads=8, dim_head=64).cuda()
for n in [1000, 2000, 4000, 8000, 16000, 32000]:
x = torch.randn(1, n, 512).cuda()
start_time = time.time()
out = attention(x)
torch.cuda.synchronize()
end_time = time.time()
print(f'Input size: {n}, Execution time: {end_time - start_time} seconds')
def test_error_rate():
attention = FlashAttention(dim=512, heads=8, dim_head=64).cuda()
x = torch.randn(1, 1000, 512).cuda()
y = torch.randn(1, 1000, 512).cuda()
out_x = attention(x)
out_y = attention(y)
error_rate = (out_x != out_y).float().mean().item()
print(f'Error rate: {error_rate}')
def test_forward():
attention = FlashAttention(dim=512, heads=8, dim_head=64)
x = torch.randn(1, 1000, 512)
out = attention(x)
assert out.shape == (1, 1000, 512), f'Unexpected output shape: {out.shape}'
def test_backward():
attention = FlashAttention(dim=512, heads=8, dim_head=64)
x = torch.randn(1, 1000, 512, requires_grad=True)
out = attention(x)
out.sum().backward()
assert x.grad is not None, 'No gradient computed'
test_memory_usage()
test_speed()
test_scalability()
test_error_rate()
test_forward()
test_backward() | FlashAttention20-main | test.py |
Math-Arxviv-main | example.py |
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import setuptools
def parse_requirements(filename):
logging.warning(f"Reading requirements from {filename}")
with open(filename, "r") as file:
lines = [line.strip() for line in file]
return [line for line in lines if line and not line.startswith("#")]
here = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(here, "src", "alpaca_farm", "__init__.py")) as f:
meta_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M)
if meta_match:
version = meta_match.group(1)
else:
raise RuntimeError("Unable to find `__version__`.")
# Must use absolute path here to ensure `python3 -m build` runs when porting package to pypi.
install_requires = parse_requirements(os.path.join(os.path.dirname(__file__), "requirements.txt"))
setuptools.setup(
name="alpaca_farm",
version=version,
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
include_package_data=True,
install_requires=install_requires,
extras_require={
"full": [
# Training efficiency.
"flash-attn",
"apex",
"deepspeed",
# Plotting and visualization.
"benepar",
"spacy",
"spacy_fastlang",
"plotly",
"mapply",
],
"dev": {
"pre-commit>=3.2.0",
"black>=23.1.0",
"isort",
},
},
python_requires=">=3.10",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
)
| alpaca_farm-main | setup.py |
import argparse
import json
import logging
import os
import numpy as np
import torch
import transformers
from huggingface_hub import HfApi, hf_hub_download
from alpaca_farm.models.reward_model import RewardConfig, RewardModel
from alpaca_farm.utils import stable_resize_token_embeddings_and_tokenizer
min_transformers_version = "4.29.2"
def get_alpaca_farm_model_names():
api = HfApi()
models = api.list_models(author="tatsu-lab", search="alpaca-farm")
model_names = [model.modelId for model in models]
model_names = [name.replace("tatsu-lab/alpaca-farm-", "").replace("-wdiff", "") for name in model_names]
return model_names
def build_argparse(model_names):
parser = argparse.ArgumentParser("Download AlpacaFarm models")
parser.add_argument("--llama-7b-hf-dir", type=str, required=True)
parser.add_argument("--alpaca-farm-model-name", choices=model_names + ["all"], default="all", required=True)
parser.add_argument("--models-save-dir", default="./pretrained_models", type=str)
parser.add_argument("--device", default="cpu", type=str)
parser.add_argument("--path-to-sft10k", type=str, help="Necessary for reconstructing reward models.")
args = parser.parse_args()
if args.path_to_sft10k is None:
args.path_to_sft10k = os.path.join(args.models_save_dir, "sft10k")
return args
def load_weight_diff(hf_hub_name, is_reward_model=False, device="cpu", path_to_sft10k=None):
if is_reward_model:
model_tuned = RewardModel.from_pretrained(
hf_hub_name,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
flash_attn=False,
config=RewardConfig(backbone_model_name_or_path=path_to_sft10k),
)
else:
model_tuned = transformers.AutoModelForCausalLM.from_pretrained(
hf_hub_name, device_map={"": torch.device(device)}, torch_dtype=torch.float32
)
tokenizer_tuned = transformers.AutoTokenizer.from_pretrained(hf_hub_name)
return model_tuned.eval(), tokenizer_tuned
def load_raw_model(model_dir, device="cpu"):
config_path = os.path.join(model_dir, "config.json")
config = json.load(open(config_path, "r"))
transformers_version = config["transformers_version"]
if transformers_version < min_transformers_version:
logging.warning(
f"Your base LLaMA checkpoint is converted with transformers=={transformers_version}, "
f"but transformers>={min_transformers_version} is expected. "
f"This may produce a corrupted checkpoint and lead to unexpected behavior. "
f"Please regenerate your base LLaMA checkpoint with transformers>={min_transformers_version}."
)
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
model_dir, device_map={"": torch.device(device)}, torch_dtype=torch.float32
)
tokenizer_raw = transformers.AutoTokenizer.from_pretrained(model_dir)
if tokenizer_raw.pad_token is None:
stable_resize_token_embeddings_and_tokenizer(
model=model_raw, tokenizer=tokenizer_raw, special_tokens_dict=dict(pad_token="[PAD]")
)
return model_raw.eval(), tokenizer_raw
def reconstruct_tuned_model(model_tuned, model_raw, is_reward_model=False):
# modifies model_tuned in-place
state_dict_diff = model_tuned.state_dict()
state_dict_raw = model_raw.state_dict()
if is_reward_model:
# reward model adds nesting to main transformer
state_dict_raw = {f"backbone_model.{k}": v for k, v in state_dict_raw.items()}
for key in state_dict_raw:
if state_dict_raw[key].size() != state_dict_diff[key].size():
# weights with a size mismatch are not diff'd in the upload
continue
state_dict_diff[key].add_(state_dict_raw[key])
def integrity_check(model_tuned, hf_hub_name):
model_sum = sum(param.sum() for param in model_tuned.state_dict().values()).item()
model_sum_file = hf_hub_download(repo_id=hf_hub_name, filename="model_sum.txt")
with open(model_sum_file, "r") as f:
model_sum_hf_hub = float(f.read())
return np.isclose(model_sum_hf_hub, model_sum)
if __name__ == "__main__":
model_names = get_alpaca_farm_model_names()
args = build_argparse(model_names)
model_names = model_names if args.alpaca_farm_model_name == "all" else [args.alpaca_farm_model_name]
for model_name in model_names:
print("Downloading", model_name)
hf_hub_name = f"tatsu-lab/alpaca-farm-{model_name}-wdiff"
is_reward_model = "reward-model" in model_name
save_dir = os.path.join(args.models_save_dir, model_name)
model_tuned, tokenizer_tuned = load_weight_diff(hf_hub_name, is_reward_model, args.device, args.path_to_sft10k)
model_raw, tokenizer_raw = load_raw_model(args.llama_7b_hf_dir, args.device)
reconstruct_tuned_model(model_tuned, model_raw, is_reward_model)
if not integrity_check(model_tuned, hf_hub_name):
print("Model weights integrity check failed. Did you use the latest llama-7b HF weights?")
model_tuned.save_pretrained(save_dir)
tokenizer_tuned.save_pretrained(save_dir)
print("Downloaded to", save_dir)
| alpaca_farm-main | pretrained_models/recover_model_weights.py |
import torch
from alpaca_farm import torch_ops
def test_batch_select():
input = torch.tensor(
[
[0, 1, 2],
[3, 0, 9],
[6, 7, 8],
]
)
index = torch.tensor([[0, 1], [1, 0], [0, 0]])
actual = torch_ops.batch_select(input, index)
expected = torch.tensor([[0, 1], [0, 3], [6, 6]])
assert actual.eq(expected).all()
def test_pad_sequence_from_left():
sequences = [
torch.tensor([0.0, 1.0, 2.0]),
torch.tensor(
[
3.0,
]
),
torch.tensor(
[
6.0,
7.0,
]
),
]
expected = torch.tensor([[0.0, 1.0, 2.0], [-1.0, -1.0, 3.0], [-1.0, 6.0, 7.0]])
actual = torch_ops.pad_sequence_from_left(sequences, batch_first=True, padding_value=-1)
torch.testing.assert_close(actual, expected)
| alpaca_farm-main | tests/test_torch_ops.py |
import transformers
from alpaca_farm import utils
def test_stable_resize_token_embeddings():
model_name_or_paths = (
"gpt2", # Tied weights.
"/juice5/scr5/nlp/llama_model/llama_hf_latest/llama-teeny", # Untied weights.
)
for model_name_or_path in model_name_or_paths:
model: transformers.PreTrainedModel = transformers.AutoModelForCausalLM.from_pretrained(model_name_or_path)
utils.stable_resize_token_embeddings(
model, target_size=model.get_input_embeddings().weight.size(0) + 10, jitter_new_embeddings=True
)
| alpaca_farm-main | tests/test_utils.py |
import fire
import pytest
import torch
import tqdm
import transformers
from ml_swissknife import utils
from torch import nn
from transformers.models.opt import modeling_opt
from transformers.utils import logging
from alpaca_farm import constants
from alpaca_farm.flash_models import flash_opt
logger = logging.get_logger(__name__)
# --- Include standard models to compare activation and help debug ---
class OPTDecoderLayerNF(modeling_opt.OPTDecoderLayer):
pass
class OPTDecoderNF(modeling_opt.OPTDecoder):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.layers = nn.ModuleList([OPTDecoderLayerNF(config) for _ in range(config.num_hidden_layers)])
self.post_init()
def forward(
self,
*args,
**kwargs,
):
out = super(OPTDecoderNF, self).forward(*args, **kwargs)
# print(out.past_key_values[0][0][:, :, -1].sum())
return out
class OPTModelNF(modeling_opt.OPTModel):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.decoder = OPTDecoderNF(config)
self.post_init()
class OPTForCausalLMNF(modeling_opt.OPTForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = OPTModelNF(config)
self.post_init()
# --- End of reckless repetition ---
@pytest.mark.parametrize("padding_side", ("left", "right"))
@pytest.mark.parametrize("dtype", (torch.float16, torch.bfloat16))
@torch.inference_mode()
def test_forward(dtype, padding_side):
# For some reason, the intermediate tests pass (within each Transformer-block assert attention outputs similar).
# But the final logit test doesn't pass.
model_name = "facebook/opt-125m"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
tokenizer.padding_side = padding_side
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1 = flash_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
model2 = modeling_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
with torch.autocast(device_type="cuda", dtype=dtype, enabled=True):
out1 = model1(**tensors, output_hidden_states=True)
out2 = model2(**tensors, output_hidden_states=True)
# Outputs are only guaranteed to match at non-padding locations. Clear irrelevant values.
def clear_padded(tensor):
tensor = tensor.masked_fill(~tensors["attention_mask"][..., None].bool(), 0.0)
return tensor
# Error accumulates! The diff for later hidden states is much larger.
atol = 1e-2 if dtype == torch.float16 else 1e-1
rtol = 0
for h1, h2 in utils.zip_(out1.hidden_states, out2.hidden_states):
h1, h2 = tuple(clear_padded(tensor) for tensor in (h1, h2))
torch.testing.assert_close(h1, h2, atol=atol, rtol=rtol)
def all_test_forward(): # This function is not called by pytest.
for dtype in (torch.float16, torch.bfloat16):
for padding_side in ("left", "right"):
test_forward(dtype, padding_side)
@torch.inference_mode()
def test_decoding():
model_name = "facebook/opt-125m"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# Batch decoding requires left pad, because if right pad, you next token logits could be based on the embedding of
# a pad token, which is wrong (even though the OPT model increments the position id correctly).
# In general, any decoder-only HF transformer requires left pad for batch decoding.
tokenizer.padding_side = "left"
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1: transformers.OPTForCausalLM = flash_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
model2: transformers.OPTForCausalLM = OPTForCausalLMNF.from_pretrained(model_name).to(device).eval()
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=True):
# greedy
out1 = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out1, skip_special_tokens=True)
print(text)
out2 = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out2, skip_special_tokens=True)
print(text)
assert torch.eq(out1, out2).all().item()
# temperature
out = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
out = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
@torch.inference_mode()
def profile_decoding():
# For short sequences, the mixed flash/non-flash approach is still slower.
model_name = "facebook/opt-1.3b"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, cache_dir=constants.DEFAULT_CACHE_DIR)
tokenizer.padding_side = "left"
text = [
"i have a good ",
"this is a very long sentence that is very long and ",
"this is a very long sentence ",
"this is a very",
] * 16
tensors = tokenizer(
text,
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1: transformers.OPTForCausalLM = flash_opt.OPTForCausalLM.from_pretrained(
model_name, cache_dir=constants.DEFAULT_CACHE_DIR
)
model2: transformers.OPTForCausalLM = OPTForCausalLMNF.from_pretrained(
model_name, cache_dir=constants.DEFAULT_CACHE_DIR
)
nbatches = 4
with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True):
for model, msg in (
(model2, "native"),
(model1, "flash"),
):
torch.cuda.empty_cache()
model.to(device).eval()
model.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=500,
do_sample=False,
num_beams=1,
)
torch.cuda.synchronize()
with utils.Timer(msg):
for _ in tqdm.tqdm(range(nbatches)):
model.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=500,
do_sample=False,
num_beams=1,
)
torch.cuda.synchronize()
def main(task, *args, **kwargs):
globals()[task](*args, **kwargs)
if __name__ == "__main__":
# Plain python run for hacking.
# python -m tests.test_flash_opt --task all_test_forward
# pytest for systematic testing.
# pytest -xs tests/test_flash_opt.py
fire.Fire(main)
| alpaca_farm-main | tests/test_flash_opt.py |
import copy
from typing import Optional
import fire
import torch
import transformers
from flash_attn.bert_padding import unpad_input
from torch import nn
from transformers.models.llama import modeling_llama
from alpaca_farm import utils
from alpaca_farm.flash_models import apex_patch, flash_llama
class LLaMADecoderLayerNF(modeling_llama.LlamaDecoderLayer):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
class LLaMAModelNF(transformers.LlamaModel):
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([LLaMADecoderLayerNF(config) for _ in range(config.num_hidden_layers)])
def forward(self, *args, **kwargs):
outputs = super().forward(*args, **kwargs)
print(outputs.past_key_values[0][0].sum())
return outputs
class LLaMAForCausalLMNF(transformers.LlamaForCausalLM):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
self.model = LLaMAModelNF(config)
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
def _prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
@torch.inference_mode()
def test_llama_attention(dtype=torch.float16):
# Test flash and standard attention produce comparable results.
# Right pad only.
device = torch.device("cuda")
batch_size, original_seqlen, num_heads, head_dim = 4, 13, 8, 32
hidden_size = num_heads * head_dim
seqlens = torch.randint(low=1, high=original_seqlen, size=(batch_size,), device=device)
attention_mask = torch.arange(original_seqlen, device=device)[None, :] < seqlens[:, None]
# TODO(lxuechen): Test with past_key_values_length.
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
flash_position_ids = torch.cat(
[
this_position_ids[this_is_selected]
for this_position_ids, this_is_selected in utils.zip_(position_ids, is_selected)
]
)
nonflash_position_ids = position_ids.masked_fill_(attention_mask == 0, 1)
hidden_states = torch.randn(batch_size, original_seqlen, hidden_size, device=device, dtype=dtype)
hidden_states_unpad, indices, cu_seqlens, max_s = unpad_input(hidden_states, attention_mask)
expanded_attention_mask = _prepare_decoder_attention_mask(
attention_mask, (batch_size, original_seqlen), hidden_states, 0
)
config = modeling_llama.LlamaConfig(
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_hidden_layers=1,
num_attention_heads=num_heads,
)
block = flash_llama.LlamaAttention(config=config).to(device)
# Create a small dummy model just for creating rotary tensors.
dummy_model = flash_llama.LlamaModel(config).to(device)
rotary_tensors = dummy_model._make_rotary_tensors(flash_position_ids)
with torch.cuda.amp.autocast(dtype=dtype):
out1, _, _ = block.forward(
hidden_states=hidden_states_unpad,
seqlens=seqlens,
cu_seqlens=cu_seqlens,
rotary_tensors=rotary_tensors,
)
out2, _, _ = super(flash_llama.LlamaAttention, block).forward(
hidden_states=hidden_states,
attention_mask=expanded_attention_mask,
position_ids=nonflash_position_ids,
)
out2, _, _, _ = unpad_input(out2, attention_mask)
torch.testing.assert_close(out1, out2, atol=1e-3, rtol=0.0)
print(".")
@torch.inference_mode()
def test_decoding():
# model_name = "/juice5/scr5/nlp/crfm/human-feedback/models/selfinstruct/llama-teeny"
model_name = "/self/nlp/scr-sync/nlp/crfm/human-feedback/models/selfinstruct/sft_v5_llama_7b_regen_v7_3ep/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# Batch decoding requires left pad, because if right pad, you next token logits could be based on the embedding of
# a pad token, which is wrong (even though the OPT model increments the position id correctly).
# In general, any decoder-only HF transformer requires left pad for batch decoding.
tokenizer.padding_side = "left"
clone_tokenizer = copy.deepcopy(tokenizer)
model1 = flash_llama.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
model2 = transformers.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
if tokenizer.pad_token is None:
utils.stable_resize_token_embeddings_and_tokenizer(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=tokenizer,
model=model1,
)
utils.stable_resize_token_embeddings_and_tokenizer(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=clone_tokenizer,
model=model2,
)
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=True):
# greedy
out1 = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out1, skip_special_tokens=True)
print(text)
out2 = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out2, skip_special_tokens=True)
print(text)
print(torch.ne(out1, out2))
print(out1 - out2)
assert torch.eq(out1, out2).all().item()
# temperature
out = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
out = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
@torch.inference_mode()
def test_forward(dtype=torch.bfloat16, padding_side="left"):
model_name = "/self/nlp/scr-sync/nlp/huggingface_hub_llms/llama-7b/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
tokenizer.padding_side = padding_side
clone_tokenizer = copy.deepcopy(tokenizer)
model1 = flash_llama.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
model2 = transformers.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
if tokenizer.pad_token is None:
utils.smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=tokenizer,
model=model1,
)
utils.smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=clone_tokenizer,
model=model2,
)
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and ", "what type of food do you like?"],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
with torch.cuda.amp.autocast(dtype=dtype):
out1 = model1(**tensors, output_hidden_states=True, return_dict=True)
out2 = model2(**tensors, output_hidden_states=True, return_dict=True)
def clear_padded(tensor):
tensor.masked_fill_(~tensors["attention_mask"][..., None].bool(), 0.0)
# tensor[:2, ...] = 0.
return tensor
# Error accumulates! The diff for later hidden states is much larger.
atol = 1e-2 if dtype == torch.float16 else 1e-1
rtol = 0
for layer_idx, (h1, h2) in enumerate(utils.zip_(out1.hidden_states, out2.hidden_states)):
h1, h2 = tuple(clear_padded(tensor) for tensor in (h1, h2))
if not torch.allclose(h1, h2, atol=atol, rtol=rtol):
print(
f"found large error for hidden states at layer: {layer_idx}. "
f"maximum diff: {(h1 - h2).abs().max().item()}. "
f"num entries with large diff: {((h1 - h2).abs() > 3).sum()}. "
f"norm of diff: {(h1 - h2).norm().item()}. "
)
def all_test_forward(): # This function is not called by pytest.
for dtype in (torch.float16, torch.bfloat16):
for padding_side in ("left", "right"):
test_forward(dtype, padding_side)
def test_fused_rms_norm():
device = torch.device("cuda")
norm = transformers.models.llama.modeling_llama.LlamaRMSNorm(256).to(device=device)
x = torch.randn(16, 128, 256, device=device)
y1 = norm(x)
y2 = apex_patch.apex_rmsnorm(norm, x)
torch.testing.assert_close(y2, y1)
def main(task, **kwargs):
# python -m models.flash_llama test_llama_attention
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_llama_attention
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_decoding
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_forward
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_fused_rms_norm
globals()[task](**kwargs)
if __name__ == "__main__":
fire.Fire(main)
| alpaca_farm-main | tests/test_flash_llama.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import alpaca_eval.utils as eval_utils
import datasets
import fire
import pandas as pd
from alpaca_farm import constants, data_preprocessor, logging, openai_utils, types, utils
logger = logging.get_logger(__name__)
MODEL_TO_PROMPTS = {
"text-davinci-003": "examples/prompts/v0_inputs_noinputs.json",
"text-davinci-001": "examples/prompts/v0_inputs_noinputs.json",
"gpt-3.5-turbo-0301": "examples/prompts/chatml_v0_char1k_inputs_noinputs.json",
"gpt-4-0314": "examples/prompts/chatml_v0_char500_inputs_noinputs.json",
}
# TODO: all of this could just use alpaca_eval
def main_oai_baselines(
all_instructions: Optional[types.AnyData] = None,
model_name: str = "text-davinci-003",
prompt_path: Optional[str] = None,
save_path: Optional[str] = "examples/data/all_outputs/eval_{model_name}.json",
decoding_args: Optional[openai_utils.OpenAIDecodingArguments] = None,
batch_size: Optional[int] = None,
num_procs: Optional[int] = None,
**kwargs,
) -> pd.DataFrame:
"""Run the OAI baselines.
Parameters
----------
all_instructions : list of dict or DataFrame or Dataset, optional
The instructions to evaluate on. If None uses Farm's eval data
model_name : str, optional
OpenAI model to use for completion.
prompt_path : str, optional
Path to the prompt dictionary. If None, uses the default prompt for the model.
save_path : str, optional
Path to save the outputs to. {model_name} will be formatted. If None, does not save.
kwargs:
Additional arguments to pass to `openai_utils.openai_completion`.
"""
prompt_path = prompt_path or MODEL_TO_PROMPTS[model_name]
if all_instructions is None:
all_instructions = datasets.load_dataset(
"tatsu-lab/alpaca_farm",
"alpaca_farm_evaluation",
cache_dir=constants.DEFAULT_CACHE_DIR,
)["eval"]
prompts, list_dict_data, _ = data_preprocessor.format_prompt_with_data_frame(
df=eval_utils.convert_to_dataframe(all_instructions),
prompt_dict=utils.jload(prompt_path),
)
if openai_utils.requires_chatml(model_name):
decoding_args = decoding_args or openai_utils.OpenAIDecodingArgumentsChat(temperature=0.7, max_tokens=300)
num_procs = num_procs or 5
batch_size = batch_size or 1
else:
decoding_args = decoding_args or openai_utils.OpenAIDecodingArguments(temperature=0.7, max_tokens=300)
num_procs = num_procs or 1
batch_size = batch_size or 10
completions = openai_utils.openai_completion(
prompts=prompts,
decoding_args=decoding_args, # not useful, openai_completion should initialize this if None
return_text=True,
batch_size=batch_size,
model_name=model_name,
num_procs=num_procs,
**kwargs,
)
df_data = eval_utils.convert_to_dataframe(list_dict_data)
df_data["output"] = completions
df_data["generator"] = model_name
columns_to_keep = [
"instruction",
"input",
"output",
"generator",
"dataset",
"datasplit",
]
if save_path is not None:
logger.info(f"Saving to {save_path.format(model_name=model_name)}")
df_data[columns_to_keep].to_json(save_path.format(model_name=model_name), orient="records", indent=2)
return df_data[columns_to_keep]
if __name__ == "__main__":
fire.Fire(main_oai_baselines)
| alpaca_farm-main | examples/oai_baselines.py |
import os
import transformers
from accelerate import DistributedDataParallelKwargs
from alpaca_farm import accelerate_patch, data_utils, logging
from alpaca_farm.rl.quark_trainer import QuarkTrainer, make_models, make_tokenizer
from alpaca_farm.rl.quark_utils import DataArguments, TrainingArguments
logger = logging.get_logger(__name__)
def main():
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = transformers.HfArgumentParser((DataArguments, TrainingArguments))
data_args, training_args = parser.parse_args_into_dataclasses()
accelerator = accelerate_patch.MyAccelerator(
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
log_with=["wandb"],
even_batches=True, # Make sure the batch size on each device is the same.
split_batches=False, # Don't break a batch into smaller chunks.
step_scheduler_with_optimizer=False, # Untie optimizer and scheduler step.
# Value model might not use all parameters (e.g., lm-head) in the forward pass.
kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)],
)
accelerator.init_trackers(
training_args.wandb_project,
init_kwargs={"wandb": {"name": training_args.run_name}},
config=training_args.__dict__,
)
logger.warning(accelerator.state, main_process_only=False) # Each process log their own state.
tokenizer: transformers.PreTrainedTokenizer = make_tokenizer(args=training_args)
model_module: dict = make_models(tokenizer=tokenizer, args=training_args, accelerator=accelerator)
data_module: dict = data_utils.make_rl_data_module(
tokenizer=tokenizer, data_args=data_args, training_args=training_args
)
trainer = QuarkTrainer(
args=training_args,
accelerator=accelerator,
**data_module,
**model_module,
tokenizer=tokenizer,
)
trainer.train()
if __name__ == "__main__":
main()
| alpaca_farm-main | examples/rlhf_quark.py |
alpaca_farm-main | examples/__init__.py |
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import pathlib
from dataclasses import dataclass, field
from typing import List, Literal
import transformers
from alpaca_farm import common, constants, data_utils, logging
from alpaca_farm.models import reward_model
from alpaca_farm.reward_modeling_trainer import Trainer, compute_reward_modeling_metrics
logger = logging.get_logger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
default=None,
metadata={"help": "Name of or path to the base generative LM."},
)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: Literal["alpaca_human_preference", "alpaca_gpt4_preference", "alpaca_noisy_multi_preference"] = field(
default="alpaca_noisy_multi_preference",
metadata={"help": "Name of the dataset. Fetches the human or GPT-4 preference data."},
)
eval_size: int = field(
default=500,
metadata={"help": "Number of examples to split out from training to use for evaluation."},
)
prompt_dict_path: str = field(
default=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
pad_token: str = field(default=constants.DEFAULT_PAD_TOKEN)
cache_dir: str = field(default=constants.DEFAULT_CACHE_DIR)
wandb_project: str = field(default=constants.WANDB_PROJECT)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be left padded to this length always during training."
},
)
label_names: List[str] = field(
default_factory=lambda: ["index_0", "index_1", "choice"],
metadata={
"help": "Names of the labels in the dataset. "
"This is needed to get transformers.Trainer to not throw those tensors away before `compute_loss`."
"By default, the trainer throws away columns it doesn't recognize when creating the "
"`train_dataloader` (see `_remove_unused_columns`). "
},
)
padding: Literal["max_length", "longest"] = field(
default="longest",
metadata={
"help": "Padding strategy. If 'max_length', pads to `model_max_length` always; this might lead to some "
"redundant compute. If 'longest', pads to the longest sequence in the batch, capped by `model_max_length`."
},
)
initialize_model_on_cpu: bool = field(
default=False,
metadata={
"help": "Whether to initialize the model on CPU. "
"If True, models on all processes will be first initialized on CPU; this is RAM-costly but faster."
},
)
end_sequence_with_eos: bool = field(
default=False,
metadata={
"help": "Whether to end sequences with EOS. "
"Ending with EOS might help the reward model realize it's time to predict."
},
)
resume_from_checkpoint: bool = field(default=False, metadata={"help": "If True, loads from last check point."})
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
os.environ["WANDB_PROJECT"] = training_args.wandb_project
if training_args.deepspeed is not None:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = None
elif training_args.initialize_model_on_cpu:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = True
else:
ctx_mgr = common.staggered_object_creation(
local_rank=training_args.local_rank, world_size=training_args.world_size
)
device_map = {"": training_args.device.index}
low_cpu_mem_usage = True
with ctx_mgr:
config = reward_model.RewardConfig(backbone_model_name_or_path=model_args.model_name_or_path)
model = reward_model.RewardModel(
flash_attn=training_args.flash_attn,
fp16=training_args.fp16,
bf16=training_args.bf16,
low_cpu_mem_usage=low_cpu_mem_usage,
device_map=device_map,
config=config,
)
common.let_model_save_mem_when_zero_grad(model)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="left", # Ensure reward is always extracted at the last token embedding.
use_fast=training_args.use_fast_tokenizer,
)
tokenizer.padding = training_args.padding
data_module = data_utils.make_binary_reward_modeling_data_module(
tokenizer=tokenizer,
data_args=data_args,
training_args=training_args,
)
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
compute_metrics=compute_reward_modeling_metrics,
**data_module,
)
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
logger.warning("hooray! training finished successfully! now on to model saving.", main_process_only=True)
trainer.evaluate()
trainer.save_state()
common.safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
logger.warning("hooray again! model saving worked.", main_process_only=True)
if __name__ == "__main__":
main()
| alpaca_farm-main | examples/reward_modeling.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import pathlib
from dataclasses import dataclass, field
from typing import List, Literal, Optional
import transformers
from transformers import Trainer
from alpaca_farm import common, constants, data_utils, logging, utils
logger = logging.get_logger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
default=None, metadata={"help": "Name to a huggingface native pretrained model or path to a model on disk."}
)
@dataclass
class DataArguments:
dataset_path: str = field(
default="tatsu-lab/alpaca_farm",
metadata={
"help": "Path to the dataset. Either points to a location on Hugging Face hub or a local folder. "
"If the path points to a local folder, the folder must be structured properly "
"(see documentation for datasets.load_dataset)."
},
)
dataset_name: Optional[str] = field(
default="alpaca_instructions",
metadata={"help": "Name of the dataset to load -- the argument `name` passed to `datasets.load_dataset`."},
)
train_splits: List[str] = field(
default_factory=lambda: ["sft"],
metadata={"help": "Splits to use for training. This must not be an empty list."},
)
eval_splits: Optional[List[str]] = field(
default_factory=lambda: ["val"],
metadata={
"help": "Splits to use for evaluation. "
"If None, empty, or the splits are not found in the dataset, no evaluation is performed."
},
)
prompt_dict_path: str = field(
default=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
pad_token: str = field(default=constants.DEFAULT_PAD_TOKEN)
cache_dir: str = field(default=constants.DEFAULT_CACHE_DIR)
wandb_project: str = field(default=constants.WANDB_PROJECT)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded to this length (and possibly truncated)."
"Enforcing a consistent max length ensures memory usage is constant and predictable."
},
)
padding: Literal["max_length", "longest"] = field(
default="longest",
metadata={
"help": "Padding strategy. If 'max_length', pads to `model_max_length` always; this might lead to some "
"redundant compute. If 'longest', pads to the longest sequence in the batch, capped by `model_max_length`."
},
)
initialize_model_on_cpu: bool = field(
default=False,
metadata={
"help": "Whether to initialize the model on CPU. "
"If True, models on all processes will be first initialized on CPU; this is RAM-costly but faster."
},
)
resume_from_checkpoint: bool = field(default=False, metadata={"help": "If True, loads from last check point."})
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
os.environ["WANDB_PROJECT"] = training_args.wandb_project
if training_args.deepspeed is not None:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = None
elif training_args.initialize_model_on_cpu:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = True
else:
ctx_mgr = common.staggered_object_creation(
local_rank=training_args.local_rank, world_size=training_args.world_size
)
device_map = {"": training_args.device.index}
low_cpu_mem_usage = True
with ctx_mgr:
model: transformers.PreTrainedModel = common.make_generative_lm(
model_name_or_path=model_args.model_name_or_path,
flash_attn=training_args.flash_attn,
fp16=training_args.fp16,
bf16=training_args.bf16,
config=transformers.AutoConfig.from_pretrained(model_args.model_name_or_path),
cache_dir=training_args.cache_dir,
low_cpu_mem_usage=low_cpu_mem_usage,
device_map=device_map,
)
common.let_model_save_mem_when_zero_grad(model)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right", # Ensures properly masking out the source tokens.
use_fast=training_args.use_fast_tokenizer,
)
tokenizer.padding = training_args.padding
# Collect special tokens. Only add if non-existent.
special_tokens_dict = dict(additional_special_tokens=[])
if tokenizer.pad_token is None:
special_tokens_dict["pad_token"] = training_args.pad_token
if tokenizer.eos_token is None:
special_tokens_dict["eos_token"] = constants.DEFAULT_EOS_TOKEN
if tokenizer.bos_token is None:
special_tokens_dict["bos_token"] = constants.DEFAULT_BOS_TOKEN
if tokenizer.unk_token is None:
special_tokens_dict["unk_token"] = constants.DEFAULT_UNK_TOKEN
utils.stable_resize_token_embeddings_and_tokenizer(model, tokenizer, special_tokens_dict)
data_module: dict = data_utils.make_supervised_data_module(
tokenizer=tokenizer,
data_args=data_args,
training_args=training_args,
)
# Tokenizer is only supplied so that it gets saved; this makes loading easier.
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
**data_module,
)
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
logger.warning("hooray! training finished successfully! now on to model saving.", main_process_only=True)
trainer.save_state()
common.safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
logger.warning("hooray again! model saving worked.", main_process_only=True)
if __name__ == "__main__":
main()
| alpaca_farm-main | examples/supervised.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import transformers
from accelerate import DistributedDataParallelKwargs
from alpaca_farm import accelerate_patch, data_utils, logging
from alpaca_farm.rl.ppo_trainer import PPOTrainer, make_models, make_tokenizer
from alpaca_farm.rl.ppo_utils import DataArguments, TrainingArguments
logger = logging.get_logger(__name__)
def main():
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = transformers.HfArgumentParser((DataArguments, TrainingArguments))
data_args, training_args = parser.parse_args_into_dataclasses()
accelerator = accelerate_patch.MyAccelerator(
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
log_with=["wandb"],
even_batches=True, # Make sure the batch size on each device is the same.
split_batches=False, # Don't break a batch into smaller chunks.
step_scheduler_with_optimizer=False, # Untie optimizer and scheduler step.
# Value model might not use all parameters (e.g., lm-head) in the forward pass.
kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)],
)
accelerator.init_trackers(
training_args.wandb_project,
init_kwargs={"wandb": {"name": training_args.run_name}},
config=training_args.__dict__,
)
logger.warning(accelerator.state, main_process_only=False) # Each process log their own state.
tokenizer: transformers.PreTrainedTokenizer = make_tokenizer(args=training_args)
model_module: dict = make_models(tokenizer=tokenizer, args=training_args, accelerator=accelerator)
data_module: dict = data_utils.make_rl_data_module(
tokenizer=tokenizer, data_args=data_args, training_args=training_args
)
trainer = PPOTrainer(
args=training_args,
accelerator=accelerator,
**data_module,
**model_module,
tokenizer=tokenizer,
)
trainer.train()
if __name__ == "__main__":
main()
| alpaca_farm-main | examples/rlhf_ppo.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import sys
from typing import Dict, Optional, Sequence, Union
import datasets
import fire
import pandas as pd
from alpaca_farm import data_preprocessor, distributed_utils, utils
from alpaca_farm.inference import decode, score
from alpaca_farm.types import AnyPath, AnyPathOrNone
sample_mode_formatter = "temperature={temperature},max_new_tokens={max_new_tokens},seed={seed}"
def run_decode(
decoder_name_or_path: AnyPath,
dataset_path="tatsu-lab/alpaca_farm",
dataset_name: Optional[str] = "alpaca_farm_evaluation",
split="eval",
prompt_dict_path=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
output_path: AnyPathOrNone = None,
max_instances=sys.maxsize,
per_device_batch_size=4,
temperature=1.0,
max_new_tokens=300,
num_return_sequences=4,
mixed_precision=None,
tf32=False,
seed: Optional[int] = None,
):
"""Decode samples from the policy language model.
Args:
decoder_name_or_path: Name or path of the policy language model.
dataset_path: Path to the dataset for datasets.load_dataset.
dataset_name: Name of the dataset for datasets.load_dataset.
prompt_dict_path: Path to the prompt dictionary for formatting the instruction and input into a string.
output_path: Optional path to save the decoding results.
split: Split of the dataset to decode.
max_instances: Maximum number of instances to decode.
per_device_batch_size: Batch size for reranking for each device.
temperature: Temperature for decoding.
max_new_tokens: Maximum number of new tokens to generate.
seed: Random seed for decoding.
num_return_sequences: Number of sequences to return per each prompt.
mixed_precision: Mixed precision mode for the reward model.
tf32: Whether to use tensorfloat32 for matrix multiplication.
Returns:
List of dict data with keys.
If num_return_sequences > 1, each 'completion' is a list of strings. Otherwise, it is a string.
"""
dataset = datasets.load_dataset(dataset_path, dataset_name)
prompts, list_dict_data, metadata = data_preprocessor.format_prompt_with_data_frame(
df=pd.DataFrame(dataset[split]),
prompt_dict=utils.jload(prompt_dict_path),
)
prompts, list_dict_data = prompts[:max_instances], list_dict_data[:max_instances]
outputs = decode.decode_prompts_with_huggingface(
model_name_or_path=decoder_name_or_path,
prompts=prompts,
decoding_args=decode.HFDecodingArguments(
temperature=temperature, max_new_tokens=max_new_tokens, num_return_sequences=num_return_sequences
),
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
seed=seed,
)
sample_mode = sample_mode_formatter.format(temperature=temperature, max_new_tokens=max_new_tokens, seed=seed)
return_list_dict_data = [
{
"instruction": dict_data["instruction"],
"input": dict_data["input"],
"output": output,
"prompt": prompt,
"decoder_name_or_path": decoder_name_or_path,
"sample_mode": sample_mode,
}
for dict_data, prompt, output in utils.zip_(list_dict_data, prompts, outputs)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def run_rerank(
list_dict_data_or_path: Union[Sequence[Dict], AnyPath],
scorer_name_or_path: AnyPath,
output_path: AnyPathOrNone = None,
per_device_batch_size=4,
rerank_top_k=1,
mixed_precision=None,
tf32=False,
flash_attn=False,
):
"""Rerank sequences with reward model.
Args:
list_dict_data_or_path: Sequence of dict data or a path to it.
Each dict should have the keys 'prompt' and 'completion' with string values that can be added together.
scorer_name_or_path: Name or path of the reward model.
output_path: Optional path to save the rerank results.
per_device_batch_size: Batch size for reranking for each device.
rerank_top_k: Keep top k among the reranked sequences.
mixed_precision: Mixed precision mode for the reward model.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
Rerank results as a list of dict data.
"""
if isinstance(list_dict_data_or_path, (str, pathlib.Path)):
list_dict_data_or_path = utils.jload(list_dict_data_or_path)
sequences = [
[dict_data["prompt"] + output for output in dict_data["output"]] for dict_data in list_dict_data_or_path
]
# TODO(lxuechen): FlashAttention reward model is not correctly loaded.
top_sequences, top_indices = score.rerank_sequences_with_huggingface(
sequences=sequences,
model_name_or_path=scorer_name_or_path,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
rerank_top_k=rerank_top_k,
)
return_list_dict_data = [
{
"instruction": dict_data["instruction"],
"input": dict_data["input"],
"output": dict_data["output"],
"top_sequence": top_sequence,
"top_index": top_index,
"scorer_name_or_path": scorer_name_or_path,
}
for top_sequence, top_index, dict_data in utils.zip_(top_sequences, top_indices, list_dict_data_or_path)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def run_best_of_n(
decoder_name_or_path: AnyPath,
scorer_name_or_path: AnyPath,
output_path: AnyPathOrNone = None,
prompt_dict_path=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
split="val",
per_device_batch_size=4,
max_instances=sys.maxsize,
temperature=1.0,
num_return_sequences=4,
max_new_tokens=300,
mixed_precision=None,
tf32=False,
flash_attn=False,
):
"""Chain together decoding and rerank."""
decode_return_list_dict_data = run_decode(
decoder_name_or_path=decoder_name_or_path,
prompt_dict_path=prompt_dict_path,
split=split,
max_instances=max_instances,
per_device_batch_size=per_device_batch_size,
temperature=temperature,
num_return_sequences=num_return_sequences,
max_new_tokens=max_new_tokens,
mixed_precision=mixed_precision,
tf32=tf32,
)
rerank_return_list_dict_data = run_rerank(
list_dict_data_or_path=decode_return_list_dict_data,
scorer_name_or_path=scorer_name_or_path,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
)
# Convert best-k-of-n into best-of-n.
return_list_dict_data = [
{
"instruction": rerank_dict_data["instruction"],
"input": rerank_dict_data["input"],
"output": rerank_dict_data["output"][rerank_dict_data["top_index"][0]],
"decoder_name_or_path": decoder_name_or_path,
"scorer_name_or_path": scorer_name_or_path,
"sample_mode": f"best_of_n_{decode_data_dict['sample_mode']}",
}
for decode_data_dict, rerank_dict_data in utils.zip_(decode_return_list_dict_data, rerank_return_list_dict_data)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def main(task, **kwargs):
globals()[task](**kwargs)
if __name__ == "__main__":
fire.Fire(main)
| alpaca_farm-main | examples/best_of_n.py |
import re
import sys
def update_version(file_path, new_version):
# Read in the file
with open(file_path, "r") as file:
filedata = file.read()
# Replace the target string
version_regex = r"__version__ = ['\"]([^'\"]*)['\"]"
filedata = re.sub(version_regex, f"__version__ = '{new_version}'", filedata)
# Write the file out again
with open(file_path, "w") as file:
file.write(filedata)
if __name__ == "__main__":
# Get the version from command line arguments
new_version = sys.argv[1]
# Update the version
update_version("src/alpaca_farm/__init__.py", new_version)
| alpaca_farm-main | .github/workflows/set_version.py |
# Copyright 2023 The Alpaca Team
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .distributed_utils import is_main_process
class MultiProcessAdapter(logging.LoggerAdapter):
"""
An adapter to assist with logging in multiprocess.
`log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
or only the main executed one. Default is `main_process_only=True`.
This is almost like the logger in accelerate, but does not have annoying accelerate dependency.
"""
@staticmethod
def _should_log(main_process_only):
process_index_flag = is_main_process()
return not main_process_only or (main_process_only and process_index_flag)
def log(self, level, msg, *args, **kwargs):
"""
Delegates logger call after checking if we should log.
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
or only the main executed one. Default is `True` if not passed
"""
main_process_only = kwargs.pop("main_process_only", True)
if self.isEnabledFor(level) and self._should_log(main_process_only):
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def get_logger(name: str, log_level: str = None):
"""
Returns a `logging.Logger` for `name` that can handle multiprocessing.
**By default, the logger only logs on the main process -- the process with env var LOCAL_RANK=0.**
If a log should be called on all processes, pass `main_process_only=False`
Args:
name (`str`):
The name for the logger, such as `__file__`
log_level (`str`, *optional*):
The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
Example:
```python
>>> from alpaca_farm.logging import get_logger
>>> logger = get_logger(__name__)
>>> logger.info("My log", main_process_only=False)
>>> logger.debug("My log", main_process_only=True)
>>> logger = get_logger(__name__, accelerate_log_level="DEBUG")
>>> logger.info("My log")
>>> logger.debug("My second log")
```
"""
logger = logging.getLogger(name)
if log_level is not None:
logger.setLevel(log_level.upper())
return MultiProcessAdapter(logger, {})
class disable_logging(object):
def __enter__(self, *args, **kwargs):
logging.disable(logging.CRITICAL)
return self
def __exit__(self, *args, **kwargs):
logging.disable(logging.NOTSET)
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorator
| alpaca_farm-main | src/alpaca_farm/logging.py |
# Copyright 2023 The Alpaca Team
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import nn, optim
from transformers import Trainer
from transformers.optimization import get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from . import logging
logger = logging.get_logger(__name__)
def create_optimizer(args, model: nn.Module, optimizer: Optional[optim.Optimizer] = None):
"""Create optimizer for trainer.
This is detached version of the `Trainer.create_optimizer` method.
We don't support sagemaker and fairscale for simplicity.
Reference:
https://github.com/huggingface/transformers/blob/main/src/transformers/trainer.py
"""
opt_model = model
if optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)],
"weight_decay": args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(args)
optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f"skipped {module}: {skipped / 2 ** 20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped / 2 ** 20}M params")
return optimizer
def create_scheduler(args, optimizer, lr_scheduler, num_training_steps):
"""Create scheduler for trainer.
This is detached version of the `Trainer.create_scheduler` method.
Reference:
https://github.com/huggingface/transformers/blob/main/src/transformers/trainer.py
"""
if lr_scheduler is None:
lr_scheduler = get_scheduler(
args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return lr_scheduler
| alpaca_farm-main | src/alpaca_farm/trainer_utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "<s>"
DEFAULT_UNK_TOKEN = "<unk>"
DEFAULT_CACHE_DIR = None
WANDB_PROJECT = "alpaca_farm"
MODEL_NAME_TO_CONFIG = {
"llama-7b": {"model_type": "llama", "num_hidden_layers": 32, "hidden_size": 4096},
"llama-13b": {"model_type": "llama", "num_hidden_layers": 40, "hidden_size": 5120},
"llama-30b": {"model_type": "llama", "num_hidden_layers": 60, "hidden_size": 6656},
"llama-65b": {"model_type": "llama", "num_hidden_layers": 80, "hidden_size": 8192},
}
MODEL_NAME_TO_FAMILY = {
"distilgpt2": "gpt2",
"gpt2": "gpt2",
"gpt2-medium": "gpt2",
"gpt2-large": "gpt2",
"gpt2-xl": "gpt2",
"facebook/opt-iml-max-1.3b": "opt",
"facebook/opt-125m": "opt",
"facebook/opt-350m": "opt",
"facebook/opt-1.3b": "opt",
"facebook/opt-2.7b": "opt",
"facebook/opt-6.7b": "opt",
"facebook/opt-13b": "opt",
"facebook/opt-30b": "opt",
"llama-teeny": "llama",
"llama-7b": "llama",
"llama-13b": "llama",
"llama-30b": "llama",
"llama-65b": "llama",
"EleutherAI/pythia-2.8b-deduped": "pythia",
"EleutherAI/pythia-6.9b-deduped": "pythia",
"EleutherAI/pythia-12b-deduped": "pythia",
}
# Huggingface model naming convention.
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
SAFE_WEIGHTS_NAME = "model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
GENERATION_CONFIG_NAME = "generation_config.json"
MODEL_CARD_NAME = "modelcard.json"
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
| alpaca_farm-main | src/alpaca_farm/constants.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.1.8'
| alpaca_farm-main | src/alpaca_farm/__init__.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from typing import Any, List, Optional, Sequence, Union
import datasets
import pandas as pd
import torch
from torch import Tensor
AnyPath = Union[str, os.PathLike, pathlib.Path]
AnyPathOrNone = Optional[AnyPath]
AnyData = Union[Sequence[dict[str, Any]], pd.DataFrame, datasets.Dataset]
Numeric = Union[int, float]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
TensorList = List[Tensor]
StrOrStrs = Union[str, Sequence[str]]
if torch.__version__ < "2.0.0":
LRScheduler = torch.optim.lr_scheduler._LRScheduler # noqa
else:
LRScheduler = torch.optim.lr_scheduler.LRScheduler
| alpaca_farm-main | src/alpaca_farm/types.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import einops
import torch
import torch.nn.functional as F
import transformers
from transformers.trainer_utils import EvalPrediction
from alpaca_farm import common, torch_ops
class Trainer(transformers.Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
# input_ids, attention_mask each of size (bsz, num_candidates, seq_len).
# index_0, index_1 each of size (bsz, num_pairs); indexes into input_ids.
# choice of size (bsz, num_pairs); 1 if index_1's seq is chosen, 0 otherwise.
input_ids, attention_mask, index_0, index_1, choice = common.unpack_dict(
inputs, keys=("input_ids", "attention_mask", "index_0", "index_1", "choice")
)
num_candidates, num_pairs = input_ids.size(1), choice.size(1)
input_ids_flat, attention_mask_flat = tuple(
einops.rearrange(x, "b c l -> (b c) l") for x in (input_ids, attention_mask)
)
outputs = model(input_ids=input_ids_flat, attention_mask=attention_mask_flat)
rewards_flat = outputs.rewards
rewards = einops.rearrange(rewards_flat, "(b c) -> b c", c=num_candidates) # Size: (bsz, num_candidates).
rewards_0, rewards_1 = tuple(
torch_ops.batch_select(rewards, index) for index in (index_0, index_1)
) # Size: (bsz, num_pairs).
logits = rewards_1 - rewards_0 # Size: (bsz, num_pairs).
# Type casting of `choice` is due to amp.autocast context manager.
loss = F.binary_cross_entropy_with_logits(logits, choice.to(logits.dtype), reduction="mean")
return (loss, dict(logits=logits)) if return_outputs else loss
def compute_reward_modeling_metrics(eval_prediction: EvalPrediction) -> Dict:
# eval_prediction.label_ids is a tuple that matches up with `training_args.label_names`.
logits = torch.tensor(eval_prediction.predictions).squeeze(-1)
labels = torch.tensor(eval_prediction.label_ids[-1]).squeeze(-1)
predictions = (logits >= 0.0).long()
accuracy = predictions.eq(labels).float().mean().item()
label_positive_rate = (labels == 1).float().mean().item()
positive_rate = (predictions == 1).float().mean().item()
true_positive_rate = (predictions * labels).float().sum().item() / labels.sum().item()
false_positive_rate = (predictions * (1 - labels)).float().sum().item() / (1 - labels).sum().item()
return dict(
accuracy=accuracy,
label_positive_rate=label_positive_rate,
positive_rate=positive_rate,
true_positive_rate=true_positive_rate,
false_positive_rate=false_positive_rate,
)
| alpaca_farm-main | src/alpaca_farm/reward_modeling_trainer.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datasets
import pandas as pd
import transformers
from . import logging, utils
from .data_postprocessor import RewardConditioningPromptPostprocessor
from .data_preprocessor import (
BinaryRewardModelingDataset,
DataCollatorForBinaryRewardModelingDataset,
DataCollatorForSFTDataset,
DataCollatorForStackableDataset,
QueryDataset,
SFTDataset,
split_train_into_train_and_eval,
)
logger = logging.get_logger(__name__)
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.train_splits])
train_dataset = SFTDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
)
eval_dataset = None
if data_args.eval_splits is not None:
found_splits = [
pd.DataFrame(alpaca_instructions[split]) for split in data_args.eval_splits if split in alpaca_instructions
]
if len(found_splits) > 0:
eval_df = pd.concat(found_splits)
eval_dataset = SFTDataset(
df=eval_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
)
if eval_dataset is None:
logger.warning("Didn't find evaluation dataset. Disabling evaluation.")
training_args.do_eval = False
data_collator = DataCollatorForSFTDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator)
def make_binary_reward_modeling_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_human_preference = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.DataFrame(alpaca_human_preference["preference"])
train_dataset = BinaryRewardModelingDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
end_sequence_with_eos=training_args.end_sequence_with_eos,
)
train_dataset, eval_dataset = split_train_into_train_and_eval(
train_dataset=train_dataset,
eval_size=data_args.eval_size,
seed=training_args.seed,
)
data_collator = DataCollatorForBinaryRewardModelingDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator)
def make_rl_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.train_splits])
eval_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.eval_splits])
if getattr(training_args, "num_reward_tokens", 0) > 0 and not getattr(
training_args, "train_on_best_quantile", True
):
prompt_postprocessor = RewardConditioningPromptPostprocessor()
else:
prompt_postprocessor = None
train_dataset = QueryDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
query_len=training_args.query_len,
prompt_postprocessor=prompt_postprocessor,
)
eval_dataset = QueryDataset(
df=eval_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
query_len=training_args.query_len,
prompt_postprocessor=prompt_postprocessor,
)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=DataCollatorForStackableDataset())
| alpaca_farm-main | src/alpaca_farm/data_utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light wrapper for OpenAI API.
Reference API:
https://beta.openai.com/docs/api-reference/completions/create
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/openai_utils.py
"""
import copy
import dataclasses
import functools
import logging
import math
import multiprocessing
import os
import random
import sys
import time
from typing import Optional, Sequence, Union
import openai
import tqdm
from openai import openai_object
StrOrOpenAIObject = Union[str, openai_object.OpenAIObject]
openai_org = os.getenv("OPENAI_ORG")
if openai_org is not None:
openai.organization = openai_org
logging.warning(f"Switching to organization: {openai_org} for OAI API key.")
@dataclasses.dataclass
class OpenAIDecodingArgumentsBase(object):
max_tokens: int = 1800
temperature: float = 0.2
top_p: float = 1.0
n: int = 1
stream: bool = False
stop: Optional[Sequence[str]] = None
# Heuristic stop when about to generate next function.
# stop: Optional[Tuple[str, ...]] = ("}\n\nstatic", "}\n\n/*")
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
# If you need these, pass them in as decoding_kwargs.
# best_of: int = 1
# logit_bias: dict = None
@dataclasses.dataclass
class OpenAIDecodingArguments(OpenAIDecodingArgumentsBase):
suffix: Optional[str] = None
logprobs: Optional[int] = None
echo: bool = False
@dataclasses.dataclass
class OpenAIDecodingArgumentsChat(OpenAIDecodingArgumentsBase):
# currently there are no arguments that are different than not chat version
pass
def requires_chatml(model: str) -> bool:
"""Whether a model requires the ChatML format."""
# TODO: this should ideally be an OpenAI function... Maybe it already exists?
return "turbo" in model or "gpt-4" in model
def convert_dict_to_openai_object(data: dict) -> openai_object.OpenAIObject:
return_data = openai_object.OpenAIObject()
return_data.update(data)
return return_data
def _openai_completion_helper(
prompt_batch: Sequence[StrOrOpenAIObject],
is_chat: bool,
sleep_time: int,
openai_organization_ids: Optional[Sequence[str]] = None,
openai_api_key: Optional[str] = os.environ.get("OPENAI_API_KEY", None),
**shared_kwargs,
):
if openai_api_key is not None:
openai.api_key = openai_api_key
# randomly select orgs
if openai_organization_ids is not None:
openai.organization = random.choice(openai_organization_ids)
# copy shared_kwargs to avoid modifying it
shared_kwargs = copy.deepcopy(shared_kwargs)
while True:
try:
if is_chat:
completion_batch = openai.ChatCompletion.create(messages=prompt_batch[0], **shared_kwargs)
choices = completion_batch.choices
for choice in choices:
assert choice.message.role == "assistant"
if choice.message.content == "":
choice["text"] = " " # annoying doesn't allow empty string
else:
choice["text"] = choice.message.content
else:
completion_batch = openai.Completion.create(prompt=prompt_batch, **shared_kwargs)
choices = completion_batch.choices
for choice in choices:
choice["total_tokens"] = completion_batch.usage.total_tokens / len(prompt_batch)
break
except openai.error.OpenAIError as e:
logging.warning(f"OpenAIError: {e}.")
if "Please reduce your prompt" in str(e):
shared_kwargs["max_tokens"] = int(shared_kwargs["max_tokens"] * 0.8)
logging.warning(f"Reducing target length to {shared_kwargs['max_tokens']}, Retrying...")
else:
logging.warning("Hit request rate limit; retrying...")
if openai_organization_ids is not None and len(openai_organization_ids) > 1:
openai.organization = random.choice(
[o for o in openai_organization_ids if o != openai.organization]
)
logging.warning(f"Switching to organization: {openai.organization} for OAI API key.")
time.sleep(sleep_time) # Annoying rate limit on requests.
return choices
def _openai_completion(
prompts: Union[str, Sequence[str], Sequence[dict[str, str]], dict[str, str]],
decoding_args: OpenAIDecodingArguments,
model_name="text-davinci-003",
sleep_time=2,
batch_size=1,
max_instances=sys.maxsize,
max_batches=sys.maxsize,
return_text=False,
num_procs=1,
**decoding_kwargs,
) -> Union[Union[StrOrOpenAIObject], Sequence[StrOrOpenAIObject], Sequence[Sequence[StrOrOpenAIObject]],]:
"""Decode with OpenAI API.
Args:
prompts: A string or a list of strings to complete. If it is a chat model the strings should be formatted
as explained here: https://github.com/openai/openai-python/blob/main/chatml.md. If it is a chat model
it can also be a dictionary (or list thereof) as explained here:
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
decoding_args: Decoding arguments.
model_name: Model name. Can be either in the format of "org/model" or just "model".
sleep_time: Time to sleep once the rate-limit is hit.
batch_size: Number of prompts to send in a single request. Only for non chat model.
max_instances: Maximum number of prompts to decode.
max_batches: Maximum number of batches to decode. This argument will be deprecated in the future.
return_text: If True, return text instead of full completion object (which contains things like logprob).
decoding_kwargs: Additional decoding arguments. Pass in `best_of` and `logit_bias` if you need them.
Returns:
A completion or a list of completions.
Depending on return_text, return_openai_object, and decoding_args.n, the completion type can be one of
- a string (if return_text is True)
- an openai_object.OpenAIObject object (if return_text is False)
- a list of objects of the above types (if decoding_args.n > 1)
"""
logging.info(f"Decoding with OpenAI API model {model_name} and numproc == {num_procs}.")
is_single_prompt = isinstance(prompts, (str, dict))
if is_single_prompt:
prompts = [prompts]
# convert prompts to chat format
is_chat = requires_chatml(model_name)
is_chat_format = isinstance(prompts[0], dict)
if is_chat:
if batch_size > 1:
logging.warning("batch_size > 1 is not supported yet for chat models. Setting to 1")
batch_size = 1
if not is_chat_format:
prompts = [prompt_to_chatml(prompt) for prompt in prompts]
if max_batches < sys.maxsize:
logging.warning(
"`max_batches` will be deprecated in the future, please use `max_instances` instead."
"Setting `max_instances` to `max_batches * batch_size` for now."
)
max_instances = max_batches * batch_size
prompts = prompts[:max_instances]
num_prompts = len(prompts)
prompt_batches = [
prompts[batch_id * batch_size : (batch_id + 1) * batch_size]
for batch_id in range(int(math.ceil(num_prompts / batch_size)))
]
shared_kwargs = dict(
model=model_name,
**decoding_args.__dict__,
)
shared_kwargs.update(decoding_kwargs) # override default arguments if specified
with multiprocessing.Pool(num_procs) as p:
partial_completion_helper = functools.partial(
_openai_completion_helper, sleep_time=sleep_time, is_chat=is_chat, **shared_kwargs
)
completions = list(
tqdm.tqdm(
p.imap(partial_completion_helper, prompt_batches),
desc="prompt_batches",
total=len(prompt_batches),
)
)
# flatten the list
completions = [completion for completion_batch in completions for completion in completion_batch]
if return_text:
completions = [completion.text for completion in completions]
if decoding_args.n > 1:
# make completions a nested list, where each entry is a consecutive decoding_args.n of original entries.
completions = [completions[i : i + decoding_args.n] for i in range(0, len(completions), decoding_args.n)]
if is_single_prompt:
# Return non-tuple if only 1 input and 1 generation.
(completions,) = completions
return completions
def string_to_dict(to_convert):
"""Converts a string with equal signs to dictionary. E.g.
>>> string_to_dict(" name=user university=stanford")
{'name': 'user', 'university': 'stanford'}
"""
return {s.split("=", 1)[0]: s.split("=", 1)[1] for s in to_convert.split(" ") if len(s) > 0}
def prompt_to_chatml(prompt: str, start_token: str = "<|im_start|>", end_token: str = "<|im_end|>"):
"""Convert a text prompt to ChatML formal
Examples
--------
>>> prompt = "<|im_start|>system\nYou are a helpful assistant.\n<|im_end|>\n<|im_start|>system
name=example_user\nKnock knock.\n<|im_end|>\n<|im_start|>system name=example_assistant\nWho's
there?\n<|im_end|>\n<|im_start|>user\nOrange.\n<|im_end|>"
>>> print(prompt)
<|im_start|>system
You are a helpful assistant.
<|im_end|>
<|im_start|>system name=example_user
Knock knock.
<|im_end|>
<|im_start|>system name=example_assistant
Who's there?
<|im_end|>
<|im_start|>user
Orange.
<|im_end|>
>>> prompt_to_chatml(prompt)
[{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'Knock knock.'},
{'role': 'assistant', 'content': "Who's there?"},
{'role': 'user', 'content': 'Orange.'}]
"""
prompt = prompt.strip()
assert prompt.startswith(start_token)
assert prompt.endswith(end_token)
message = []
for p in prompt.split("<|im_start|>")[1:]:
newline_splitted = p.split("\n", 1)
role = newline_splitted[0].strip()
content = newline_splitted[1].split(end_token, 1)[0].strip()
if role.startswith("system") and role != "system":
# based on https://github.com/openai/openai-cookbook/blob/main/examples
# /How_to_format_inputs_to_ChatGPT_models.ipynb
# and https://github.com/openai/openai-python/blob/main/chatml.md it seems that system can specify a
# dictionary of other args
other_params = string_to_dict(role.split("system", 1)[-1])
role = "system"
else:
other_params = dict()
message.append(dict(content=content, role=role, **other_params))
return message
# Keep the private function for backwards compat.
openai_completion = _openai_completion
| alpaca_farm-main | src/alpaca_farm/openai_utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
import types
import warnings
from pathlib import Path
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
import accelerate
import torch
import torch.distributed as dist
import transformers
from accelerate.utils import convert_outputs_to_fp32, is_torch_version
from torch import nn
from torch.distributed.fsdp import FullStateDictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import StateDictType
from transformers.trainer import WEIGHTS_NAME, is_deepspeed_zero3_enabled
from . import constants, logging, utils
from .types import AnyPath, AnyPathOrNone
logger = logging.get_logger(__name__)
def apex_is_installed():
try:
import apex
return True
except ImportError as _:
return False
def flash_attn_is_installed():
try:
import flash_attn
return True
except ImportError as _:
return False
class staggered_object_creation(object):
"""
Objection creation in a distributed setting could be very RAM-intensive.
This function staggers the creation of objects on odd and even ranks, so that not all objects
are created at once.
Assumes local_rank == -1 means no distributed training.
"""
def __init__(self, local_rank: int, world_size: int):
super().__init__()
self.local_rank = local_rank
self.world_size = world_size
def __enter__(self, *args, **kwargs):
del args, kwargs
if self.world_size > 1 and self.local_rank % 2 == 0:
dist.barrier()
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
if self.world_size > 1:
if self.local_rank % 2 == 1:
dist.barrier()
dist.barrier() # Final safety barrier.
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorator
def make_generative_lm(
model_name_or_path: str,
flash_attn: bool,
fp16: Optional[bool] = None,
bf16: Optional[bool] = None,
mixed_precision: Optional[str] = None,
**kwargs,
):
if fp16 is None:
fp16 = mixed_precision == "fp16"
if bf16 is None:
bf16 = mixed_precision == "bf16"
if flash_attn and not fp16 and not bf16:
logger.warning(
"Flash attention does not support fp32. Reverting to standard attention.", main_process_only=True
)
flash_attn = False
if flash_attn and flash_attn_is_installed():
from .flash_models import flash_llama
model_cls = flash_llama.LlamaForCausalLM
else:
model_cls = transformers.LlamaForCausalLM
return model_cls.from_pretrained(model_name_or_path, **kwargs)
def let_model_save_mem_when_zero_grad(model: nn.Module):
def new_zero_grad(self, set_to_none: bool = True) -> None:
r"""Sets gradients of all model parameters to zero. See similar function
under :class:`torch.optim.Optimizer` for more context.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
See :meth:`torch.optim.Optimizer.zero_grad` for details.
"""
if getattr(self, "_is_replica", False):
warnings.warn(
"Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "
"The parameters are copied (in a differentiable manner) from the original module. "
"This means they are not leaf nodes in autograd and so don't accumulate gradients. "
"If you need gradients in your forward method, consider using autograd.grad instead."
)
for p in self.parameters():
if p.grad is not None:
if set_to_none:
p.grad = None
else:
if p.grad.grad_fn is not None:
p.grad.detach_()
else:
p.grad.requires_grad_(False)
p.grad.zero_()
# Make zero_grad `set_to_none=True` by default.
# Need this runtime method patching, since self is used within zero_grad.
model.zero_grad = types.MethodType(new_zero_grad, model)
return model
def safe_save_model_for_hf_trainer(
trainer: transformers.Trainer, output_dir: str, give_rw_access=True, rank0_only=True
):
"""Collects the state dict and dump to disk."""
now = time.perf_counter()
if trainer.fsdp is not None:
# NOTE(rtaori): technically should be rank0_only=True (otherwise duplicates model in RAM),
# but currently there seems to be a bug in FSDP that causes it to hang.
# Migration to Pytorch 2 should fix this.
# Once we migrate, we can also implement more efficient loading:
# https://github.com/pytorch/pytorch/blob/master/torch/distributed/fsdp/api.py#L286-L295
# NOTE(tianyi): tested on sphinx6, seems to work fine with rank0_only=False
cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
with FSDP.state_dict_type(trainer.model, StateDictType.FULL_STATE_DICT, cfg):
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
trainer._save(output_dir, state_dict=state_dict) # noqa
elif trainer.deepspeed is not None:
# --- The stuff below is almost a copy from transformers.trainer.Trainer.save_model (transformers==4.27.3) ---
# this takes care of everything as long as we aren't under zero3
if trainer.args.should_save:
trainer._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either use deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if trainer.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
logger.warning(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not trainer.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
trainer.deepspeed.save_checkpoint(output_dir)
# --- End of shameless copy ---
# Auto-convert the checkpoint to fp32 for easier downstream use.
# Only rank0 shall do the checkpoint conversion to prevent race conditions.
if trainer.args.should_save:
try:
os.system(
f"python {output_dir}/zero_to_fp32.py '{output_dir}' '{output_dir}/pytorch_model.bin'"
)
except Exception as e:
logger.fatal(f"Failed to convert zero3 checkpoint to fp32: {e}")
else: # Also support saving for non-FSDP models.
# NOTE(lxuechen): Saving and loading T5 has weird pickle issues due to device map.
# Wasn't able to exactly pinpoint. But saving to and loading from CPU seems to work.
# In principle, trainer.save_model() should do the same thing, but breaks in practice.
# We drop T5 support.
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
if trainer.args.should_save:
if give_rw_access:
try:
os.system(f"chmod -R a+xwr {output_dir}")
except Exception as e:
logger.fatal(f"Failed to give read-write access to {output_dir}: {e}")
logger.warning(f"Saving model took {time.perf_counter() - now:.2f} seconds.")
def flatten_dict(nested, sep=".", postprocess_fn=lambda *args: args):
def rec(nest, prefix, into):
for k, v in nest.items():
if sep in k:
raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
if isinstance(v, dict): # collections.Mapping fails in py3.10.
rec(v, prefix + k + sep, into)
else:
v = postprocess_fn(v)
into[prefix + k] = v
flat = {}
rec(nested, "", flat)
return flat
def unpack_dict(d: Dict, keys: Sequence[str], return_type: type = tuple) -> Union[Sequence, Dict]:
if return_type in (tuple, list):
return return_type(d[key] for key in keys)
elif return_type == dict:
return {key: d[key] for key in keys}
else:
raise ValueError(f"Unknown return_type: {return_type}")
def merge_dict(dicts: Sequence[dict], merge_fn: Callable = lambda *args: args) -> dict:
"""Merge a sequence of dicts (with the same set of keys) into a single dict."""
if len(dicts) == 0:
return dict()
return {key: merge_fn([dict_[key] for dict_ in dicts]) for key in dicts[0].keys()}
def model_name_or_path_exists(model_name_or_path: AnyPath) -> bool:
try:
transformers.PretrainedConfig.get_config_dict(model_name_or_path)
except OSError:
return os.path.exists(Path(model_name_or_path) / "trainer_state.json")
return True
def get_transformer_hidden_size(model: transformers.PreTrainedModel):
if isinstance(model, transformers.GPT2LMHeadModel):
hidden_size_attr_name = "n_embd"
elif isinstance(model, transformers.OPTForCausalLM):
hidden_size_attr_name = "word_embed_proj_dim"
elif isinstance(model, transformers.T5ForConditionalGeneration):
hidden_size_attr_name = "d_model"
else:
# Hack to deal with the fact that transformers library changed the LLaMA model name.
llama_cls = getattr(
transformers, "LLaMAForCausalLM" if hasattr(transformers, "LLaMAForCausalLM") else "LlamaForCausalLM"
)
if isinstance(model, llama_cls):
hidden_size_attr_name = "hidden_size"
else:
raise ValueError(f"Unknown base_model type: {type(model)}")
from typing import Any, Mapping
return getattr(model.config, hidden_size_attr_name)
def prepare_inputs(data: Union[torch.Tensor, Any], device: Union[str, int, torch.device]) -> Union[torch.Tensor, Any]:
if isinstance(data, Mapping):
return type(data)({k: prepare_inputs(v, device) for k, v in data.items()}) # noqa
elif isinstance(data, (tuple, list)):
return type(data)(prepare_inputs(v, device) for v in data)
elif isinstance(data, torch.Tensor):
return data.to(device) # This can break with deepspeed.
return data
def cast_with_native_amp(func: Callable, mixed_precision: Optional[str] = None) -> Callable:
"""Almost like how huggingface accelerate cast `model.forward`."""
if mixed_precision not in ("fp16", "bf16"):
logger.warning(f"Unknown mixed precision mode: {mixed_precision}, falling back to fp32.")
return func
if mixed_precision == "fp16" and is_torch_version(">=", "1.10"):
output_func = torch.cuda.amp.autocast(dtype=torch.float16)(func)
else:
device_type = "cuda" if torch.cuda.is_available() else "cpu"
output_func = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(func)
output_func = convert_outputs_to_fp32(output_func)
return output_func
def prepare_model_for_custom_fn(model: nn.Module, fn_name: str, accelerator: accelerate.Accelerator) -> nn.Module:
"""Wrap a custom function of a model with the right mixed precision context.
This function should be run on *raw* model, i.e., before wrapped into DDP or FSDP.
"""
if accelerator.native_amp:
# Store original function.
original_fn_name = f"_original_{fn_name}"
original_fn = getattr(model, fn_name)
setattr(model, original_fn_name, original_fn)
# New set function.
wrapped_fn = cast_with_native_amp(original_fn, mixed_precision=accelerator.mixed_precision)
setattr(model, fn_name, wrapped_fn)
return model
| alpaca_farm-main | src/alpaca_farm/common.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utility functions.
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/utils.py
"""
import argparse
import functools
import io
import json
import os
import random
from typing import Callable, Optional, Sequence, Union
import numpy as np
import torch
import transformers
from torch.utils.data import DataLoader
from . import logging
from .types import Numeric
logger = logging.get_logger(__name__)
home = os.path.expanduser("~")
home_data = os.path.join(home, "data")
join = os.path.join
pathexists = os.path.exists
makedirs = functools.partial(os.makedirs, exist_ok=True)
dirname = os.path.dirname
basename = os.path.basename
def alleq(l: Sequence, f: Optional[Callable] = lambda x, y: x == y):
"""Check all arguments in a sequence are equal according to a given criterion.
Args:
f: A bi-variate boolean function.
l: A list/tuple.
Returns:
True if everything is equal; otherwise False.
"""
return all(f(l[0], li) for li in l[1:])
def zip_(*args: Sequence):
"""Assert sequences of same length before zipping."""
if len(args) == 0:
return []
assert alleq(args, lambda x, y: len(x) == len(y))
return zip(*args)
def _make_w_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f_dirname = os.path.dirname(f)
if f_dirname != "":
makedirs(f_dirname)
f = open(f, mode=mode)
return f
def _make_r_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
return f
def jload(f, mode="r"):
"""Load a .json file into a dictionary."""
f = _make_r_io_base(f, mode)
jdict = json.load(f)
f.close()
return jdict
def jdump(obj: Union[str, dict, list], f, mode="w", indent=4, default=str):
"""Dump a str or dictionary to a file in json format.
Args:
obj: An object to be written.
f: A string path to the location on disk.
mode: Mode for opening the file.
indent: Indent for storing json dictionaries.
default: A function to handle non-serializable entries; defaults to `str`.
"""
f = _make_w_io_base(f, mode)
if isinstance(obj, (dict, list)):
json.dump(obj, f, indent=indent, default=default)
elif isinstance(obj, str):
f.write(obj)
else:
raise ValueError(f"Unexpected type: {type(obj)}")
f.close()
def jdumps(obj, indent=4, default=str):
return json.dumps(obj, indent=indent, default=default)
def mean(*seqs: Sequence[Numeric]) -> Union[Numeric, Sequence[Numeric]]:
singleton = len(seqs) == 1
means = [float(np.mean(seq)) for seq in seqs]
return means[0] if singleton else means
def stable_resize_token_embeddings_and_tokenizer(
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizer,
special_tokens_dict: dict,
):
"""Resize tokenizer and embedding together.
For new tokens, the embedding value is the average of all old embedding vectors.
"""
tokenizer.add_special_tokens(special_tokens_dict)
stable_resize_token_embeddings(model, len(tokenizer))
def stable_resize_token_embeddings(model: transformers.PreTrainedModel, target_size: int, jitter_new_embeddings=False):
num_new_tokens = target_size - model.get_input_embeddings().weight.size(0)
model.resize_token_embeddings(target_size)
if num_new_tokens > 0:
@torch.inference_mode()
def stable_init(embedding):
embedding_data = embedding.weight.data
embedding_avg = embedding_data[:-num_new_tokens].mean(dim=0, keepdim=True)
embedding_data[-num_new_tokens:] = embedding_avg
if jitter_new_embeddings:
embedding_std = embedding_data[:-num_new_tokens].std(dim=0, keepdim=True)
# The random tensor must be of the same shape as the new embeddings.
embedding_data[-num_new_tokens:] += torch.randn_like(embedding_data[-num_new_tokens:]) * embedding_std
input_embeddings = model.get_input_embeddings() # Must grab this again after resize.
output_embeddings = model.get_output_embeddings()
# It doesn't matter if there's weight sharing or not; with sharing, the second init will overwrite the first.
for embeddings in (input_embeddings, output_embeddings):
stable_init(embeddings)
def convert_str_dtype_to_torch_dtype(str_dtype: Optional[str]):
if str_dtype in ("single", "float32", "float", "fp32", None):
return torch.float
elif str_dtype in ("half", "float16", "fp16"):
return torch.float16
elif str_dtype in ("bfloat16", "bf16"):
return torch.bfloat16
elif str_dtype in ("double", "float64"):
return torch.float64
else:
raise ValueError(f"Unknown dtype: {str_dtype}")
def manual_seed(args_or_seed: Union[int, argparse.Namespace], fix_cudnn=False):
if hasattr(args_or_seed, "seed"):
args_or_seed = args_or_seed.seed
random.seed(args_or_seed)
np.random.seed(args_or_seed)
torch.manual_seed(args_or_seed)
torch.cuda.manual_seed_all(args_or_seed)
os.environ["PYTHONHASHSEED"] = str(args_or_seed)
if fix_cudnn:
torch.backends.cudnn.deterministic = True # noqa
torch.backends.cudnn.benchmark = False # noqa
class InfiniteLoader(object):
"""Wraps an existing loader so that it outputs stuff indefinitely; useful for semi-supervised learning."""
def __init__(self, loader: DataLoader):
super(InfiniteLoader, self).__init__()
self.loader = loader
self.iterator = iter(loader)
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self.iterator = iter(self.loader)
return next(self.iterator)
def parallel_sort(*args: Sequence, key=None, reverse=False):
"""Parallel sort of multiple lists."""
if key is None:
# Parallel sort based on the order of the first list.
key = lambda inputs: inputs[0] # noqa
ret = sorted(zip_(*args), key=key, reverse=reverse)
return tuple([ret_i[j] for ret_i in ret] for j in range(len(args)))
| alpaca_farm-main | src/alpaca_farm/utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PyTorch's distributed training.
Compatible with torchrun / elastic.
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/distributed_utils.py
"""
import os
import sys
from typing import Optional
import torch
import torch.distributed as dist
from .types import Tensor
def setup(rank: Optional[int] = None, world_size: Optional[int] = None):
if rank is None:
rank = get_local_rank()
if world_size is None:
world_size = get_world_size()
if world_size <= 1:
return rank, world_size
if not dist.is_initialized():
if sys.platform == "win32":
# Distributed package only covers collective communications with Gloo
# backend and FileStore on Windows platform. Set init_method parameter
# in init_process_group to a local file.
# Example init_method="file:///f:/libtmp/some_file"
init_method = "file:///f:/libtmp/dist-tmp"
dist.init_process_group(backend="gloo", init_method=init_method, rank=rank, world_size=world_size)
elif torch.cuda.is_available():
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
else:
dist.init_process_group(backend="gloo", rank=rank, world_size=world_size)
return rank, world_size
def cleanup():
dist.destroy_process_group()
def get_local_rank():
return int(os.getenv("LOCAL_RANK", 0))
def get_world_size():
return int(os.getenv("WORLD_SIZE", 1))
def should_save():
"""Return True if the current process is the main process."""
return get_local_rank() <= 0
def all_gather_and_cat(tensor: Tensor, dim=0):
if get_world_size() > 1:
tensor_list = [torch.empty_like(tensor) for _ in range(get_world_size())]
dist.all_gather(tensor_list, tensor)
tensor = torch.cat(tensor_list, dim=dim)
return tensor
is_main_process = should_save
| alpaca_farm-main | src/alpaca_farm/distributed_utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Postprocessors for prompts and data frames.
Internal map:
https://github.com/lxuechen/human-feedback/blob/main/instruction_following/postprocessor.py
"""
from dataclasses import dataclass
from typing import Callable, Sequence, Union
import pandas as pd
@dataclass
class SequentialPostProcessor(object):
operations: Sequence[Callable]
def __post_init__(self):
special_tokens = []
for operation in self.operations:
if hasattr(operation, "special_tokens"):
special_tokens.extend(operation.special_tokens)
self.special_tokens = special_tokens
def __call__(self, df: Union[pd.DataFrame, dict]) -> Union[pd.DataFrame, dict]:
for operation in self.operations:
df = operation(df)
return df
@dataclass
class RewardConditioningPromptPostprocessor(object):
injected_token = "<reward_0>"
def __call__(self, prompt: str, **kwargs):
return f"{self.injected_token}{prompt}"
| alpaca_farm-main | src/alpaca_farm/data_postprocessor.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Union
import torch
import torch.nn.functional as F
from . import utils
from .types import Tensor
def batch_select(input: Tensor, index: Tensor):
"""Select elements from a batched tensor with a batched index tensor.
Example:
input = torch.tensor([
[0, 1, 2],
[3, 0, 9],
[6, 7, 8],
])
index = torch.tensor([[0, 1], [1, 0], [0, 0]])
batch_select(input, index) = tensor([
[0, 1],
[0, 3],
[6, 6]
])
"""
dummy_index = torch.arange(input.size(0), device=input.device).unsqueeze(-1)
return input[dummy_index, index]
def pad_sequence_from_left(
sequences: Sequence[Tensor],
batch_first: bool = False,
padding_value: float = 0.0,
):
"""Mirror of `torch.nn.utils.rnn.pad_sequence`, but pad from left."""
sequences = tuple(sequence.flip(0) for sequence in sequences)
padded_sequence = torch._C._nn.pad_sequence(sequences, batch_first, padding_value) # noqa
padded_sequence = padded_sequence.flip(int(batch_first))
return padded_sequence
def compute_logprobs(logits: Tensor, labels: Tensor, ignore_index: int) -> Tensor:
"""Compute per-token logprobs, zeroing out places with ignore_index (padding)."""
return -F.cross_entropy(logits.permute(0, 2, 1), labels, reduction="none", ignore_index=ignore_index)
def whiten(values: Tensor, shift_mean=True, epsilon=1e-8) -> Tensor:
assert values.size(0) >= 8, f"Internal error: Minibatch size {values.size(0)} is insufficient for whitening."
mean, std = values.mean(), values.std(unbiased=False) # noqa
whitened = (values - mean) / (std + epsilon)
if not shift_mean:
whitened = whitened + mean
return whitened
def pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0, left=True):
current_size = inputs.size()
diffs = tuple(ti - ci for ti, ci in utils.zip_(target_size, current_size))
pad_params = []
for diff in diffs:
pad_params = ([diff, 0] if left else [0, diff]) + pad_params
res = F.pad(inputs, pad=pad_params, value=value)
return res
def left_pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0):
return pad(inputs=inputs, target_size=target_size, value=value, left=True)
def right_pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0):
return pad(inputs=inputs, target_size=target_size, value=value, left=False)
| alpaca_farm-main | src/alpaca_farm/torch_ops.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import accelerate
class MyAccelerator(accelerate.Accelerator):
"""Thin wrapper for accelerate.Accelerator."""
def __repr__(self):
return (
f"Accelerator(\n"
f" state={self.state}, \n"
f" gradient_accumulation_steps={self.gradient_accumulation_steps:.6f}, \n"
f" split_batches={self.split_batches}, \n"
f" step_scheduler_with_optimizer={self.step_scheduler_with_optimizer},\n"
f")"
)
def unwrap_optimizer(self, optimizer: accelerate.accelerator.AcceleratedOptimizer):
return optimizer.optimizer
| alpaca_farm-main | src/alpaca_farm/accelerate_patch.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
from typing import Callable, Dict, Optional, Sequence, Union
import einops
import pandas as pd
import torch
import transformers
from torch.utils.data import Dataset
from . import constants, logging, torch_ops, utils
from .types import Tensor
logger = logging.get_logger(__name__)
def format_prompt(example: dict, prompt_dict: dict) -> str:
"""Formats a prompt with a prompt_dict formatter.
Args:
example: A dict-like object with required keys "instruction" and "input"
prompt_dict: Dictionary containing the keys "prompt_noinputs" and "prompt_inputs" which have
placeholders corresponding to the keys from `example`. E.g. "{instruction}".
Returns:
A formatted prompt string.
Examples
--------
>>> format_prompt(dict(instruction="test", input=""), prompt_dict=dict(prompt_noinputs="prompt {instruction} "))
"prompt test"
"""
assert "instruction" in example and "input" in example, "Internal error: example missing required keys."
if example["input"] is None or len(example["input"]) == 0:
formatted_prompt = prompt_dict["prompt_noinputs"].format_map(example)
else:
formatted_prompt = prompt_dict["prompt_inputs"].format_map(example)
return formatted_prompt
def format_output(example: dict, eos_token: Optional[str] = None, output_key="output") -> str:
if eos_token is None:
eos_token = ""
return f"{example[output_key]}{eos_token}"
def format_prompt_with_data_frame(
df: pd.DataFrame,
prompt_dict: dict,
df_postprocessor: Optional[Callable] = None,
return_dict=False,
):
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
prompts = [format_prompt(example, prompt_dict) for example in list_dict_data]
metadata = {"prompt_dict": prompt_dict}
if return_dict:
return dict(prompts=prompts, list_dict_data=list_dict_data, metadata=metadata)
return prompts, list_dict_data, metadata
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> dict:
"""Tokenize a list of strings and return the tokenized content as well metadata (e.g., truncation statistics)."""
padding = getattr(tokenizer, "padding", "max_length")
return_overflowing_tokens = transformers.__version__ <= "4.26.1"
# TODO(lxuechen): Until HF supports fast tokenizer for OPT, we can't make a joint call on the list of strings
# when `return_overflowing_tokens=True`.
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding=padding,
max_length=tokenizer.model_max_length,
truncation=True,
return_overflowing_tokens=return_overflowing_tokens,
)
for text in strings
]
if padding == "max_length":
input_ids = labels = torch.cat([tokenized.input_ids for tokenized in tokenized_list])
else: # "longest"
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
if return_overflowing_tokens:
input_ids_lens = labels_lens = [
tokenizer.model_max_length + tokenized.num_truncated_tokens.item() for tokenized in tokenized_list
]
# `num_truncated_tokens` can be negative, if no truncation occurred.
num_truncated_tokens = sum(max(tokenized.num_truncated_tokens.item(), 0) for tokenized in tokenized_list)
num_truncated_examples = sum(tokenized.num_truncated_tokens.item() > 0 for tokenized in tokenized_list)
else:
logger.warning(
"You are using a `transformers` version that does not support `return_overflowing_tokens=True`. "
"The tokenization metadata will not be recorded."
"In order to see truncation statistics, please downgrade to `transformers<=4.26.1`."
)
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
num_truncated_tokens = num_truncated_examples = -1
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
tokenization_metadata=dict(
num_examples=len(tokenized_list),
num_truncated_tokens=num_truncated_tokens,
num_truncated_examples=num_truncated_examples,
input_ids_avg_len=utils.mean(input_ids_lens),
input_ids_max_len=max(input_ids_lens),
input_ids_min_len=min(input_ids_lens),
labels_avg_len=utils.mean(labels_lens),
labels_max_len=max(labels_lens),
labels_min_len=min(labels_lens),
model_max_length=tokenizer.model_max_length,
),
)
def preprocess_for_sft(
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor=None,
verbose=True,
) -> dict[str, Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Tokenize each example and create the labels.
Args:
df: DataFrame containing the data. Must have columns 'instruction', 'input', and 'output'.
prompt_dict: Dictionary for formatting prompts.
tokenizer: Tokenizer to use. If None, use the tokenizer for the given model.
df_postprocessor: Function to apply to the DataFrame before tokenization.
verbose: Whether to print tokenization metadata.
Returns:
A dictionary mapping str to torch.Tensor.
"""
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
sources = [format_prompt(dict_data, prompt_dict) for dict_data in list_dict_data]
targets = [format_output(dict_data, eos_token=tokenizer.eos_token) for dict_data in list_dict_data]
examples = [s + t for s, t in utils.zip_(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in utils.zip_(labels, sources_tokenized["input_ids_lens"]):
label[:source_len] = constants.IGNORE_INDEX # Input context should not contribute to loss.
packaged_data = dict(
input_ids=input_ids,
labels=labels,
metadata=dict(),
tokenization_metadata=examples_tokenized["tokenization_metadata"],
)
if verbose:
logger.warning(f"Tokenization metadata:\n{utils.jdumps(packaged_data['tokenization_metadata'])}")
return packaged_data
def preprocess_for_reward_modeling(
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
end_sequence_with_eos: bool = False,
verbose=True,
) -> dict[str, torch.Tensor]:
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
index_0, index_1 = tuple(
torch.full(size=(len(list_dict_data), 1), fill_value=fill_value, dtype=torch.long) for fill_value in (0, 1)
)
def _get_numeric_preference(example: dict):
# 1 vs 2 is stored in table, but for modeling we use 0 vs 1; remap here.
return {1: 0, 2: 1}[example["preference"]]
choice = torch.tensor([[_get_numeric_preference(dict_data)] for dict_data in list_dict_data])
def _get_text(example: dict, output_key: str):
source = format_prompt(example, prompt_dict=prompt_dict)
target = format_output(
example,
eos_token=tokenizer.eos_token if end_sequence_with_eos else None,
output_key=output_key,
)
return source + target
text_list_0, text_list_1 = tuple(
[_get_text(dict_data, key) for dict_data in list_dict_data] for key in ("output_1", "output_2")
)
def _merge_tokenization_metadata(metadata_list: Sequence[dict]) -> dict:
num_examples = sum(metadata["num_examples"] for metadata in metadata_list)
num_truncated_tokens = sum(metadata["num_truncated_tokens"] for metadata in metadata_list)
num_truncated_examples = sum(metadata["num_truncated_examples"] for metadata in metadata_list)
input_ids_avg_lens = (
sum([metadata["input_ids_avg_len"] * metadata["num_examples"] for metadata in metadata_list]) / num_examples
)
input_ids_max_len = max(metadata["input_ids_max_len"] for metadata in metadata_list)
input_ids_min_len = min(metadata["input_ids_min_len"] for metadata in metadata_list)
labels_avg_lens = (
sum([metadata["labels_avg_len"] * metadata["num_examples"] for metadata in metadata_list]) / num_examples
)
labels_max_len = max(metadata["labels_max_len"] for metadata in metadata_list)
labels_min_len = min(metadata["labels_min_len"] for metadata in metadata_list)
return dict(
num_examples=num_examples,
num_truncated_tokens=num_truncated_tokens,
num_truncated_examples=num_truncated_examples,
input_ids_avg_len=input_ids_avg_lens,
input_ids_max_len=input_ids_max_len,
input_ids_min_len=input_ids_min_len,
labels_avg_len=labels_avg_lens,
labels_max_len=labels_max_len,
labels_min_len=labels_min_len,
)
logger.warning(f"Tokenizing {len(list_dict_data)} pairs...")
tokenized_0, tokenized_1 = tuple(_tokenize_fn(text_list, tokenizer) for text_list in (text_list_0, text_list_1))
# "size" (bsz, 2, seq_len)
input_ids = [list(pair) for pair in utils.zip_(tokenized_0["input_ids"], tokenized_1["input_ids"])]
labels = [list(pair) for pair in utils.zip_(tokenized_0["labels"], tokenized_1["labels"])]
tokenization_metadata = _merge_tokenization_metadata(
[tokenized_0["tokenization_metadata"], tokenized_1["tokenization_metadata"]]
)
packaged_data = dict(
input_ids=input_ids,
labels=labels,
index_0=index_0,
index_1=index_1,
choice=choice,
tokenization_metadata=tokenization_metadata,
metadata=dict(mean_choice=choice.float().mean().item()),
)
if verbose:
logger.warning(f"Tokenization metadata:\n{utils.jdumps(packaged_data['tokenization_metadata'])}")
return packaged_data
def _get_generator(seed: int) -> torch.Generator:
rng = torch.Generator()
rng.manual_seed(seed)
return rng
def split_train_into_train_and_eval(train_dataset: Dataset, eval_size: int, seed: int) -> tuple[Dataset, Dataset]:
assert eval_size < len(
train_dataset # noqa
), "Requested eval_size cannot be equal/larger than original train data size."
new_train_size = len(train_dataset) - eval_size # noqa
train_dataset, eval_dataset = torch.utils.data.random_split(
train_dataset, [new_train_size, eval_size], generator=_get_generator(seed)
)
return train_dataset, eval_dataset
class SFTDataset(Dataset):
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
):
super(SFTDataset, self).__init__()
data_dict = preprocess_for_sft(
df=df, prompt_dict=prompt_dict, tokenizer=tokenizer, df_postprocessor=df_postprocessor
)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.metadata = data_dict["metadata"]
self.tokenization_metadata = data_dict["tokenization_metadata"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
@dataclasses.dataclass
class DataCollatorForSFTDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=constants.IGNORE_INDEX)
# When sequences are right padded, `attention_mask` is only useful for T5 training.
attention_mask = input_ids.ne(self.tokenizer.pad_token_id).long()
return dict(
input_ids=input_ids,
labels=labels,
attention_mask=attention_mask,
)
class BinaryRewardModelingDataset(Dataset):
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
end_sequence_with_eos: bool = False,
):
super(BinaryRewardModelingDataset, self).__init__()
data_dict = preprocess_for_reward_modeling(
df=df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
df_postprocessor=df_postprocessor,
end_sequence_with_eos=end_sequence_with_eos,
)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.index_0 = data_dict["index_0"]
self.index_1 = data_dict["index_1"]
self.choice = data_dict["choice"]
self.metadata = data_dict["metadata"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
index_0=self.index_0[i],
index_1=self.index_1[i],
choice=self.choice[i],
)
@dataclasses.dataclass
class DataCollatorForBinaryRewardModelingDataset(object):
"""
This collation assumes data preprocessing converts text into *padded* tensors of the same length.
For autoregressive models like OPT and GPT2, `input_ids` alone is sufficient to produce the rewards.
For enc-dec models like T5, we need `labels`.
`input_ids` and `labels` are tensors of size (bsz, num_candidates, max_seq_len), i.e., each batch instance has
`num_candidates` generations/completions.
`index_0` and `index_1` are tensors of size (bsz, num_pairs), and are used to index into `input_ids` and
`labels` to find the first and second sequences in the pair.
`choice` is a binary int/long tensor of size (bsz, num_pairs) indicating which sequence in the pair is better,
i.e., 0 means the first sequence is preferred, and 1 means otherwise.
"""
tokenizer: transformers.PreTrainedTokenizer
def _left_pad_helper(self, instances: Sequence[dict], key: str):
# TODO(lxuechen): Potentially replace with `transformers.PretrainedTokenizerBase.prepare_for_model`.
# `instances` is a list of dicts, each dict has key whose value is a list of tensors, possibly of unequal length.
input_ids = [seq for instance in instances for seq in instance[key]] # Flatten.
input_ids = torch_ops.pad_sequence_from_left(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id,
)
input_ids = einops.rearrange(
input_ids,
"(bsz num_candidates) max_seq_len -> bsz num_candidates max_seq_len",
num_candidates=len(instances[0][key]),
)
return input_ids
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
index_0, index_1, choice = tuple(
torch.stack([instance[key] for instance in instances]) for key in ("index_0", "index_1", "choice")
)
input_ids = self._left_pad_helper(instances, "input_ids")
attention_mask = input_ids.ne(self.tokenizer.pad_token_id).long()
return dict(
input_ids=input_ids,
attention_mask=attention_mask,
index_0=index_0,
index_1=index_1,
choice=choice,
)
class QueryDataset(Dataset):
"""Dataset that emits tokenized left-padded queries."""
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
query_len: int,
df_postprocessor: Optional[Callable] = None,
prompt_postprocessor: Optional[Callable] = None,
):
super(QueryDataset, self).__init__()
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
prompts = [format_prompt(example=dict_data, prompt_dict=prompt_dict) for dict_data in list_dict_data]
if prompt_postprocessor is not None:
prompts = [prompt_postprocessor(prompt) for prompt in prompts]
queries = [tokenizer(prompt, return_tensors="pt", truncation=False).input_ids[0] for prompt in prompts]
filtered_queries = [query for query in queries if len(query) <= query_len]
logger.warning(
f"Filtered out {len(queries) - len(filtered_queries)} instances out of {len(queries)} that "
f"exceed length limit. These examples are not used for training, but will still be used in evaluation. "
)
queries = torch.stack(
[
torch_ops.left_pad(query, target_size=(query_len,), value=tokenizer.pad_token_id)
for query in filtered_queries
]
)
self.queries = queries
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
# Auxiliary data.
self.prompts = prompts
self.list_dict_data = list_dict_data
def __getitem__(self, i):
return dict(queries=self.queries[i], query_attn_masks=self.query_attn_masks[i])
def __len__(self):
return len(self.queries)
class QueryResponseDataset(Dataset):
def __init__(
self,
tokenizer: transformers.PreTrainedTokenizer,
queries: Sequence[str],
responses: Sequence[str],
query_len: int,
response_len: int,
):
super(QueryResponseDataset, self).__init__()
def tokenize_without_truncation(strings):
return [tokenizer(string, return_tensors="pt", truncation=False).input_ids[0] for string in strings]
sequences = [query + response for query, response in utils.zip_(queries, responses)]
queries = tokenize_without_truncation(queries)
sequences = tokenize_without_truncation(sequences)
responses = [sequence[len(query) :] for sequence, query in utils.zip_(sequences, queries)]
filtered_pairs = [
(query, response)
for query, response in utils.zip_(queries, responses)
if len(query) <= query_len and len(response) <= response_len
]
filtered_queries = [query for query, _ in filtered_pairs]
filtered_responses = [response for _, response in filtered_pairs]
logger.warning(
f"Filtered out {len(queries) - len(filtered_queries)} instances out of {len(queries)} that "
f"exceed length limit... "
f"These examples are not used for training. "
f"However they won't be ignored if this is eval set that is used in `RLTrainer.evaluate`."
)
def left_pad_and_stack(list_of_tensors: Sequence[torch.Tensor], target_len: int):
return torch.stack(
[
torch_ops.left_pad(tensor, target_size=(target_len,), value=tokenizer.pad_token_id)
for tensor in list_of_tensors
]
)
queries = left_pad_and_stack(filtered_queries, query_len)
responses = left_pad_and_stack(filtered_responses, response_len)
self.queries = queries
self.responses = responses
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
def __getitem__(self, i):
return dict(queries=self.queries[i], responses=self.responses[i], query_attn_masks=self.query_attn_masks[i])
def __len__(self):
return len(self.queries)
@dataclasses.dataclass
class DataCollatorForStackableDataset(object):
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
return {key: torch.stack([instance[key] for instance in instances]) for key in instances[0].keys()}
| alpaca_farm-main | src/alpaca_farm/data_preprocessor.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes that are shared across different algorithms.
WARNING:
Do not tamper with the state_dict function for any of these classes.
If you tamper, make sure the keys are the same, otherwise FSDP will get confused.
"""
import abc
from typing import Dict, Optional
import torch
import transformers
from torch import Tensor, nn
from .. import common, logging, torch_ops
logger = logging.get_logger(__name__)
class Policy(nn.Module, abc.ABC):
def __init__(
self, args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
):
super().__init__()
self.args = args
self.base_model = base_model
self.base_tokenizer = base_tokenizer
@abc.abstractmethod
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
raise NotImplementedError
def respond(
self,
queries: Tensor,
query_attn_masks: Tensor,
temperature: Optional[float] = None,
num_return_sequences=1,
) -> Dict[str, Tensor]:
assert not self.training, "Policy must be in eval model for generation."
return self._post_respond(self._respond(queries, query_attn_masks, temperature, num_return_sequences))
@abc.abstractmethod
def _respond(
self, queries: Tensor, query_attn_masks: Tensor, temperature: Optional[float] = None, num_return_sequences=1
) -> Dict[str, Tensor]:
raise NotImplementedError
def _post_respond(self, respond_outputs: Dict[str, Tensor]) -> Dict[str, Tensor]:
return respond_outputs
class AutoregressivePolicy(Policy):
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
# TODO(lxuechen): Refactor attention mask. Here query_attn_masks overrides padding-based attention mask.
if temperature is None:
temperature = self.args.temperature
input_ids = torch.cat([queries, responses], dim=1)
attention_mask = input_ids.ne(self.base_tokenizer.pad_token_id)
attention_mask[:, : queries.size(1)] = query_attn_masks
# Fix position id issues and ensure consistency with `respond` for GPT and OPT.
inputs = self.base_model.prepare_inputs_for_generation(
input_ids=input_ids,
attention_mask=attention_mask,
use_cache=False,
)
outputs = self.base_model(**inputs, output_hidden_states=True)
original_logits = outputs.logits[:, -self.args.response_len - 1 : -1]
logits = original_logits / temperature
labels = input_ids[:, -self.args.response_len :]
logprobs = torch_ops.compute_logprobs(logits, labels, ignore_index=self.base_tokenizer.pad_token_id)
entropies = -(logits.softmax(dim=-1) * logits.log_softmax(dim=-1)).sum(dim=-1)
last_hidden_state = outputs.hidden_states[-1][:, -self.args.response_len - 1 : -1]
return dict(
original_logits=original_logits,
logits=logits,
logprobs=logprobs,
entropies=entropies,
last_hidden_state=last_hidden_state,
)
def _respond(
self,
queries: Tensor,
query_attn_masks: Tensor,
temperature: Optional[float] = None,
num_return_sequences=1,
) -> Dict[str, Tensor]:
if temperature is None:
temperature = self.args.temperature
sequences = self.base_model.generate(
inputs=queries,
attention_mask=query_attn_masks,
do_sample=True,
max_new_tokens=self.args.response_len,
pad_token_id=self.base_tokenizer.pad_token_id,
top_p=1.0,
top_k=0,
temperature=temperature,
num_return_sequences=num_return_sequences,
synced_gpus=True,
)
responses = torch_ops.right_pad(
sequences[:, queries.size(1) :],
target_size=(sequences.size(0), self.args.response_len),
value=self.base_tokenizer.pad_token_id,
)
return dict(responses=responses) # Size (bsz * num_return_sequences, response_len).
class Value(nn.Module, abc.ABC):
def __init__(
self, args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
):
super().__init__()
self.args = args
self.base_model = base_model
self.base_tokenizer = base_tokenizer
hidden_size = common.get_transformer_hidden_size(base_model)
value_head = torch.nn.Linear(hidden_size, 1)
value_head.weight.data.zero_()
value_head.bias.data.zero_()
self.value_head = value_head.to(next(base_model.parameters()).device)
@abc.abstractmethod
def forward(self, queries: Tensor, query_attn_masks: Tensor, responses: Tensor) -> Dict[str, Tensor]:
raise NotImplementedError
class AutoregressiveValue(Value):
def forward(self, queries: Tensor, query_attn_masks: Tensor, responses: Tensor) -> Dict[str, Tensor]:
sequences = torch.cat([queries, responses], dim=1)
sequence_attn_masks = sequences.ne(self.base_tokenizer.pad_token_id)
inputs = self.base_model.prepare_inputs_for_generation(
input_ids=sequences,
attention_mask=sequence_attn_masks,
use_cache=False,
)
outputs = self.base_model.model(**inputs, return_dict=True)
# value[t]: \hat{V}(sequences_{:t-1}); must align with `_estimate_advantage`.
last_hidden_state = outputs.last_hidden_state[:, queries.size(1) - 1 : -1]
values = self.value_head(last_hidden_state).squeeze(-1)
return dict(values=values)
class ActorCritic(nn.Module):
def __init__(self, policy: Policy, value_model: Value):
super(ActorCritic, self).__init__()
self.policy = policy
self.value_model = value_model
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
# Assume the policy and value model share the same tokenizer.
o1 = self.policy(queries, query_attn_masks, responses, temperature)
o2 = self.value_model(queries, query_attn_masks, responses)
return {**o1, **o2}
def respond(
self, queries: Tensor, query_attn_masks: Tensor, temperature: Optional[float] = None
) -> Dict[str, Tensor]:
return self.policy.respond(queries=queries, query_attn_masks=query_attn_masks, temperature=temperature)
def make_policy_with_base_model(
args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
) -> Policy:
if base_model.config.is_encoder_decoder:
raise NotImplementedError
else:
return AutoregressivePolicy(args, base_model, base_tokenizer)
def make_value_with_base_model(
args,
base_model: transformers.PreTrainedModel,
base_tokenizer: transformers.PreTrainedTokenizer,
) -> Value:
if base_model.config.is_encoder_decoder:
raise NotImplementedError
else:
return AutoregressiveValue(args, base_model, base_tokenizer)
| alpaca_farm-main | src/alpaca_farm/models/rl_models.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alpaca_farm-main | src/alpaca_farm/models/__init__.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import transformers
from torch import Tensor, nn
from transformers.utils.generic import ModelOutput
from .. import common
class RewardConfig(transformers.PretrainedConfig):
model_type = "reward_model"
# Huggingface doesn't allow non-kwargs for `__init__`.
def __init__(self, backbone_model_name_or_path=None, **kwargs):
super(RewardConfig, self).__init__(**kwargs)
self.backbone_model_name_or_path = backbone_model_name_or_path
self._name_or_path = backbone_model_name_or_path
class RewardModelOutput(ModelOutput):
rewards: Tensor = None
class RewardModel(transformers.PreTrainedModel):
config_class = RewardConfig
def __init__(self, config: RewardConfig, **kwargs):
super(RewardModel, self).__init__(config)
self.backbone_model = common.make_generative_lm(config.backbone_model_name_or_path, **kwargs)
hidden_size = common.get_transformer_hidden_size(self.backbone_model)
reward_head = nn.Linear(hidden_size, 1)
torch.nn.init.zeros_(reward_head.bias)
self.reward_head = reward_head.to(next(self.backbone_model.parameters()).device)
def forward(self, input_ids, attention_mask=None, return_dict=True, **kwargs):
# We only compute the rewards and don't compute the logistic regression loss in this function so that it's
# easier to use for later stages of reranking / RL training.
outputs = self.backbone_model.model(
input_ids=input_ids, attention_mask=attention_mask, return_dict=True, **kwargs
)
last_hidden_state = outputs.last_hidden_state
last_hidden_state_at_the_end = last_hidden_state[:, -1, :]
# TODO(lxuechen): Make returning rewards at all positions and last_hidden_state an option.
rewards = self.reward_head(last_hidden_state_at_the_end).squeeze(-1)
return RewardModelOutput(rewards=rewards) if return_dict else (rewards,)
| alpaca_farm-main | src/alpaca_farm/models/reward_model.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .eval import PairwiseAutoAnnotator, alpaca_leaderboard
| alpaca_farm-main | src/alpaca_farm/auto_annotations/__init__.py |
import json
import logging
from pathlib import Path
from typing import Any, Optional, Sequence, Union
import alpaca_eval.annotators as eval_annotators
import alpaca_eval.utils as eval_utils
import datasets
import pandas as pd
from alpaca_eval import metrics
from .. import constants
__all__ = ["alpaca_leaderboard", "PairwiseAutoAnnotator"]
CURRENT_DIR = Path(__file__).parent
ANNOTATORS_CONFIG_DIR = CURRENT_DIR / "annotators"
PRECOMPUTED_LEADERBOARD = {
"annotator_pool_v0/configs.yaml": {
# Internal codename: rlhf_llama_7b_regen_v7_3ep_v12_ckpt_20
"RLHF PPO": {
"n_draws": 9.0,
"n_total": 805.0,
"n_wins": 392.0,
"n_wins_base": 404.0,
"standard_error": 1.753281981205392,
"win_rate": 49.25465838509317,
},
# Internal codename: sft_v6_52k_llama_7b_regen_v7_3ep_recover
"SFT 52k (Alpaca 7B)": {
"n_draws": 16.0,
"n_total": 805.0,
"n_wins": 312.0,
"n_wins_base": 477.0,
"standard_error": 1.707927043869429,
"win_rate": 39.75155279503105,
},
# Internal codename: sft_v6_llama_7b_regen_v7_3ep
"SFT 10k": {
"n_draws": 19.0,
"n_total": 802.0,
"n_wins": 278.00,
"n_wins_base": 505.00,
"standard_error": 1.67,
"win_rate": 35.85,
},
"Davinci001": {
"n_draws": 0.0,
"n_total": 805.0,
"n_wins": 201.0,
"n_wins_base": 604.0,
"standard_error": 1.5264851835334794,
"win_rate": 24.96894409937888,
},
"ChatGPT": {
"n_draws": 9.0,
"n_total": 805.0,
"n_wins": 503.0,
"n_wins_base": 293.0,
"standard_error": 1.6920642123984606,
"win_rate": 63.04347826086957,
},
"LLaMA 7B": {
"n_draws": 0.0,
"n_total": 775.0,
"n_wins": 98.0,
"n_wins_base": 677.0,
"standard_error": 1.1946348760380694,
"win_rate": 12.645161290322582,
},
"GPT4": {
"n_draws": 17.0,
"n_total": 804.0,
"n_wins": 631.0,
"n_wins_base": 156.0,
"standard_error": 1.4002932714785454,
"win_rate": 79.53980099502488,
},
}
}
# TODO: alpaca_leaderboard could also be replaced with alpaca_eval functions
def alpaca_leaderboard(
path_or_all_outputs: Union[eval_utils.AnyData, eval_utils.AnyPath],
annotators_config: eval_utils.AnyPath = "annotator_pool_v0/configs.yaml",
name: str = "Current method",
is_add_reference_methods: bool = True,
is_print_metrics: bool = False,
**kwargs,
) -> pd.DataFrame:
"""Add the given model to the Alpaca leaderboard.
Parameters
----------
path_or_all_outputs : str or list of dict
The outputs of the model to add to the leaderboard as a list of dictionaries, or a path to list of JSON. Each
dictionary (or row) should contain the following keys: `instruction`, `input`, and `output`.
annotators_config : str, optional
The path to the annotator's config file. For details see the docstring of `PairwiseAutoAnnotator`.
name : str, optional
The name of the model to add to the leaderboard.
is_add_reference_methods : bool, optional
Whether to add the Alpaca reference methods to the leaderboard.
is_print_metrics : bool, optional
Whether to print the metrics.
kwargs :
Additional arguments to pass to `PairwiseAutoAnnotator`.
"""
try:
with open(path_or_all_outputs) as f:
all_outputs = json.load(f)
logging.info(f"Loaded outputs from {path_or_all_outputs}.")
except:
all_outputs = path_or_all_outputs
if is_add_reference_methods:
all_metrics = PRECOMPUTED_LEADERBOARD[annotators_config]
else:
all_metrics = dict()
outputs_baseline = datasets.load_dataset(
"tatsu-lab/alpaca_farm",
"alpaca_farm_evaluation",
cache_dir=constants.DEFAULT_CACHE_DIR,
)["eval"]
if len(all_outputs) != 805:
logging.warning(
f"""You gave {len(all_outputs)} outputs, but there are 805 examples in Alpaca Eval.
We are computing the metrics on all examples you gave."""
)
outputs_1 = eval_utils.load_or_convert_to_dataframe(outputs_baseline)
outputs_2 = eval_utils.load_or_convert_to_dataframe(all_outputs)
annotator = PairwiseAutoAnnotator(annotators_config=annotators_config, **kwargs)
annotated = annotator.annotate_head2head(outputs_1=outputs_1, outputs_2=outputs_2)
all_metrics[name] = metrics.pairwise_to_winrate(preferences=[a["preference"] for a in annotated])
df_results = pd.DataFrame(all_metrics).T.sort_values(by="win_rate", ascending=False)
if is_print_metrics:
print(df_results.to_string(float_format="%.2f"))
else:
return df_results
class PairwiseAutoAnnotator(eval_annotators.PairwiseAnnotator):
def __init__(
self,
annotators_config: Union[eval_utils.AnyPath, list[dict[str, Any]]] = "annotator_pool_v0",
input_keys: Sequence[str] = ("instruction", "input"),
p_label_flip: Optional[float] = None,
base_dir: eval_utils.AnyPath = ANNOTATORS_CONFIG_DIR,
other_keys_to_keep: Sequence[str] = tuple(),
**kwargs,
):
super().__init__(
annotators_config=annotators_config,
input_keys=input_keys,
p_label_flip=p_label_flip,
base_dir=base_dir,
other_keys_to_keep=other_keys_to_keep,
**kwargs,
)
@property
def SingleAnnotator(self):
return SinglePairwiseAutoAnnotator
class SinglePairwiseAutoAnnotator(eval_annotators.SinglePairwiseAnnotator):
def _get_prompt_template(self, prompt_template: dict[str, str]):
# prompt_template will now be a dictionary of prompt templates of len 2 (one with and one without input)
_get_prompt_template = super()._get_prompt_template
return {k: _get_prompt_template(prompt) for k, prompt in prompt_template.items()}
def make_prompts(self, df_to_annotate, prompt_template=None):
if prompt_template is None:
prompt_template = self.prompt_template
arr_is_inputs = (df_to_annotate["input"] != "") & (df_to_annotate["input"].notnull())
df_with_inputs = df_to_annotate[arr_is_inputs]
df_without_inputs = df_to_annotate[~arr_is_inputs]
prompts, df = super().make_prompts(
df_without_inputs,
prompt_template=prompt_template["without_inputs"],
)
if arr_is_inputs.any():
prompts_i, df_i = super().make_prompts(
df_with_inputs,
prompt_template=prompt_template["with_inputs"],
)
prompts += prompts_i
df = pd.concat([df, df_i], axis=0, ignore_index=True)
return prompts, df
| alpaca_farm-main | src/alpaca_farm/auto_annotations/eval.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Callable, List, Optional, Tuple, Union
import einops
import torch
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast
from transformers.models.llama import modeling_llama
from .. import utils
from . import apex_patch, tensor_ops
logger = logging.getLogger(__name__)
def rotate_half(x):
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_embedding(q, k, cos, sin):
cos, sin = cos.to(q.dtype), sin.to(q.dtype)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class LlamaAttention(modeling_llama.LlamaAttention):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
def forward( # noqa
self,
hidden_states: torch.Tensor, # (total_nnz, hidden_size).
seqlens: torch.Tensor, # (bsz,).
cu_seqlens: torch.Tensor, # (bsz+1,).
rotary_tensors: tuple[torch.Tensor, torch.Tensor],
# position_ids is only used for non-flash version, when past_key_value is not None. For flash version,
# rotary_tensors already takes positions into account.
position_ids: Optional[torch.Tensor] = None,
# Crucial loop invariant: We assume past_key_value (input/output) is always in padded format.
# More precisely, each tensor is of size (bsz, num_heads, seqlen, head_dim).
# Otherwise we can't extend it with the current key/value embedding through torch.cat easily.
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
use_cache=False,
attention_mask_k: Optional[torch.Tensor] = None,
pad_back: Optional[Callable] = None,
):
if past_key_value is None:
# (total_nnz, hidden_size) -> (total_nnz, num_heads, head_dim).
query_states, key_states, value_states = [
einops.rearrange(func(hidden_states), "t (h d) -> t h d", h=self.num_heads)
for func in (self.q_proj, self.k_proj, self.v_proj)
]
query_states, key_states = apply_rotary_embedding(query_states, key_states, *rotary_tensors)
qkv = torch.stack([query_states, key_states, value_states], dim=1)
assert qkv.dtype in (
torch.float16,
torch.bfloat16,
), f"Flash attention expected mixed precision. But found qkv dtype: {qkv.dtype}"
attn_output = flash_attn_unpadded_qkvpacked_func(
qkv=qkv,
cu_seqlens=cu_seqlens,
max_seqlen=seqlens.max(),
dropout_p=0.0,
causal=True,
softmax_scale=self.head_dim**-0.5,
)
attn_output = einops.rearrange(attn_output, "t h d -> t (h d)")
attn_output = self.o_proj(attn_output)
if use_cache:
key_states, value_states = tuple(
einops.rearrange(pad_back(tensor), "b s h d -> b h s d") for tensor in (key_states, value_states)
)
past_key_value = (key_states, value_states)
return attn_output, None, past_key_value
else:
return super(LlamaAttention, self).forward( # noqa
hidden_states=hidden_states,
attention_mask=attention_mask_k,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
)
class LlamaDecoderLayer(modeling_llama.LlamaDecoderLayer):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
del self.self_attn
self.self_attn = LlamaAttention(config=config)
def forward( # noqa
self,
hidden_states: torch.Tensor,
seqlens: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_tensors: tuple[torch.Tensor, torch.Tensor],
position_ids: torch.Tensor,
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
use_cache=False,
attention_mask_k: Optional[torch.Tensor] = None,
pad_back: Optional[Callable] = None,
):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn( # noqa
hidden_states=hidden_states,
seqlens=seqlens,
cu_seqlens=cu_seqlens,
rotary_tensors=rotary_tensors,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
attention_mask_k=attention_mask_k,
pad_back=pad_back,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = apex_patch.apex_rmsnorm(self.post_attention_layernorm, hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value,)
return outputs
class LlamaModel(modeling_llama.LlamaModel):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self._cache_rotary_embeddings()
def _cache_rotary_embeddings(self, max_position_embeddings=2048, base=10000):
dim = self.config.hidden_size // self.config.num_attention_heads
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos(), persistent=False) # (seqlen, head_dim).
self.register_buffer("sin_cached", emb.sin(), persistent=False) # (seqlen, head_dim).
def _make_rotary_tensors(self, position_ids: torch.Tensor):
# position_ids only affects the cos and sin applied to the query and key embeddings.
# flash path: position_ids size = (total_nnz,); cos sin size = (total_nnz, 1, head_dim)
# nonflash path: we don't create rotary tensors here, and rely on the builtin RotaryEmbedding.
# this assumes position_ids size = (bsz, seqlen).
assert position_ids.dim() == 1
# (total_nnz, 1, head_dim)
cos, sin = [tensor[position_ids].unsqueeze(1) for tensor in (self.cos_cached, self.sin_cached)]
return cos, sin
def forward( # noqa
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
assert not output_attentions
assert inputs_embeds is None
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.embed_tokens(input_ids)
execute_flash = past_key_values is None
if execute_flash:
if position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
position_ids = torch.cat([t[i] for t, i in utils.zip_(position_ids, is_selected)])
rotary_tensors = self._make_rotary_tensors(position_ids)
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = tensor_ops.unpad_input(hidden_states, attention_mask)
attention_mask_k = None
else:
if position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -1].unsqueeze(-1)
rotary_tensors = None
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = hidden_states, lambda x: x, None, None
# Broadcast assumes query_len == 1.
attention_mask_k = torch.zeros(
size=attention_mask.size(), dtype=hidden_states.dtype, device=hidden_states.device
).masked_fill(~attention_mask.bool(), torch.tensor(torch.finfo(hidden_states.dtype).min))[:, None, None, :]
all_hidden_states = () if output_hidden_states else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (pad_back(hidden_states),)
past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = decoder_layer(
hidden_states=hidden_states,
seqlens=attention_mask.sum(dim=1),
cu_seqlens=cu_seqlens_q,
rotary_tensors=rotary_tensors,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
attention_mask_k=attention_mask_k,
pad_back=pad_back,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[1],)
hidden_states = apex_patch.apex_rmsnorm(self.norm, hidden_states)
hidden_states = pad_back(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if return_dict:
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
)
return tuple(v for v in (hidden_states, next_cache, all_hidden_states) if v is not None)
class LlamaForCausalLM(modeling_llama.LlamaForCausalLM):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
self.model = LlamaModel(config)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
if past_key_values is None: # flash path
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
position_ids = torch.cat(
[
this_position_ids[this_is_selected]
for this_position_ids, this_is_selected in utils.zip_(position_ids, is_selected)
]
)
else: # non-flash path
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
| alpaca_farm-main | src/alpaca_farm/flash_models/flash_llama.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import torch
from flash_attn import bert_padding
def pad_to_multiples_of_x(tensor: torch.Tensor, x: int = 8):
"""Pad a tensor along the batch dimension to a multiple of x."""
total_nnz, hidden_size = tensor.size()
pad_len = (x - total_nnz % x) % x
if pad_len != 0:
tensor = torch.cat(
[
tensor,
torch.zeros([pad_len, hidden_size], device=tensor.device, dtype=tensor.dtype),
],
dim=0,
)
def unpad_x(padded_tensor):
return padded_tensor[:-pad_len] if pad_len > 0 else padded_tensor
return tensor, unpad_x
def unpad_input(padded: torch.Tensor, attention_mask: torch.Tensor) -> tuple[torch.Tensor, Callable, torch.Tensor, int]:
"""Wrapper for unpad_input in official flash-attn."""
batch_size, padded_seqlen = padded.shape[:2]
unpadded, indices, cu_seqlens, max_seqlen = bert_padding.unpad_input(padded, attention_mask)
def pad_back(unpadded: torch.Tensor):
return bert_padding.pad_input(unpadded, indices, batch_size, padded_seqlen)
return unpadded, pad_back, cu_seqlens, max_seqlen
| alpaca_farm-main | src/alpaca_farm/flash_models/tensor_ops.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alpaca_farm-main | src/alpaca_farm/flash_models/__init__.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Optional, Tuple, Union
import einops
import torch
import transformers
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
from torch import nn
from transformers.models.opt import modeling_opt
from transformers.utils import logging
from . import apex_patch, tensor_ops
logger = logging.get_logger(__name__)
class OPTDecoderLayer(modeling_opt.OPTDecoderLayer):
def forward( # noqa
self,
# (bsz x seqlen, hidden_size) or (bsz, 1, hidden_size) if past_key_value is not None.
hidden_states: torch.Tensor,
pad_back: Callable,
cu_seqlens_q: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
# Crucial loop invariant: We assume past_key_value (input/output) is always in padded format.
# More precisely, each tensor is of size (bsz, seqlen, hidden_size).
# Otherwise we can't extend it with the current key/value embedding through torch.cat easily.
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask_k: Optional[torch.Tensor] = None, # (bsz, seqlen+1,).
use_cache=False,
):
residual = hidden_states
hidden_states = apex_patch.apex_layernorm(self.self_attn_layer_norm, hidden_states)
query = self.self_attn.q_proj(hidden_states)
key = self.self_attn.k_proj(hidden_states)
value = self.self_attn.v_proj(hidden_states)
num_heads, head_dim = self.self_attn.num_heads, self.self_attn.head_dim
if past_key_value is None: # hidden_states should be in unpadded format to run flash-attn.
query, key, value = tuple(
einops.rearrange(tensor, "nnz (h d) -> nnz h d", h=num_heads, d=head_dim)
for tensor in (query, key, value)
)
hidden_states = flash_attn_unpadded_func(
q=query,
k=key,
v=value,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_q,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_q,
dropout_p=(self.self_attn.dropout if self.training else 0.0),
causal=True,
softmax_scale=self.self_attn.scaling,
)
hidden_states = einops.rearrange(hidden_states, "nnz h d -> nnz (h d)")
else: # hidden_states should be in padded format.
query = query * self.self_attn.scaling
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
query_states = einops.rearrange(query, "b s (h d) -> (b h) s d", h=num_heads, d=head_dim)
key_states = einops.rearrange(key, "b l (h d) -> (b h) l d", h=num_heads, d=head_dim)
value_states = einops.rearrange(value, "b l (h d) -> (b h) l d", h=num_heads, d=head_dim)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
attn_weights = (
# attention_mask_k broadcast correctness assumes query_len == 1.
einops.rearrange(attn_weights, "(b h) s l -> b h s l", h=num_heads)
+ attention_mask_k[:, None, None, :]
)
attn_weights = einops.rearrange(attn_weights, "b h s l -> (b h) s l")
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
hidden_states = torch.bmm(attn_probs, value_states)
hidden_states = einops.rearrange(hidden_states, "(b h) s d -> b s (h d)", h=num_heads, d=head_dim)
# Below requires pytorch 2.0. Installing pytorch 2.0 however may break other packages.
# Only migrate when things become more stable.
# hidden_states = F.scaled_dot_product_attention(
# query=query,
# key=key,
# value=value,
# attn_mask=attention_mask_k[:, None, None, :].bool(), # This assumes query_len == 1.
# dropout_p=(self.self_attn.dropout if self.training else 0.0),
# causal=False,
# )
# hidden_states = einops.rearrange(hidden_states, "b h s d -> b s (h d)")
hidden_states = self.self_attn.out_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = apex_patch.apex_layernorm(self.final_layer_norm, hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if use_cache:
if past_key_value is None:
key, value = tuple(
einops.rearrange(pad_back(tensor), "b s h d -> b s (h d)", h=num_heads, d=head_dim)
for tensor in (key, value)
)
present_key_value = (key, value) # (bsz, seqlen+1, hidden_size).
outputs += (present_key_value,)
return outputs
class OPTDecoder(modeling_opt.OPTDecoder):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.post_init()
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, transformers.models.opt.modeling_opt.BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# This simplified fast implementation only supports a subset of configurations.
# We also ignore use_cache, but we don't assert that because it's True at training time
# (even though it's not actually used) and I don't know how to set it to False at training time only.
# We can add support for specific configurations as needed.
assert attention_mask is not None
assert output_attentions is False
assert head_mask is None
assert self.gradient_checkpointing is False
assert inputs_embeds is None
assert self.final_layer_norm is not None
assert self.project_in is None
assert self.project_out is None
assert self.layerdrop == 0
for layer in self.layers:
assert layer.do_layer_norm_before is True
# past_key_values is a list of tuples (key, value). key/value each of size (bsz, seqlen, hidden_size).
past_key_values_length = past_key_values[0][0].shape[1] if past_key_values is not None else 0
# Embed inputs and positions
input_ids = input_ids.view(-1, input_ids.shape[-1])
inputs_embeds = self.embed_tokens(input_ids)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
assert (
inputs_embeds.size() == pos_embeds.size()
), "Internal error: inputs_embeds and pos_embeds not of same shape."
hidden_states = inputs_embeds + pos_embeds
if past_key_values_length == 0:
# Unpad hidden states: (bsz, seqlen, hidden_size) -> (total_nnz, hidden_size)
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = tensor_ops.unpad_input(hidden_states, attention_mask)
attention_mask_k = None
else:
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = hidden_states, lambda x: x, None, None
attention_mask_k = torch.zeros(
size=attention_mask.size(), dtype=inputs_embeds.dtype, device=inputs_embeds.device
).masked_fill(~attention_mask.bool(), torch.tensor(torch.finfo(inputs_embeds.dtype).min))
next_decoder_cache = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
for idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (pad_back(hidden_states),)
past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = layer(
hidden_states=hidden_states,
pad_back=pad_back,
cu_seqlens_q=cu_seqlens_q,
max_seqlen_q=max_seqlen_q,
past_key_value=past_key_value,
attention_mask_k=attention_mask_k,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[1],)
hidden_states = apex_patch.apex_layernorm(self.final_layer_norm, hidden_states)
hidden_states = pad_back(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if return_dict:
return transformers.models.opt.modeling_opt.BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
)
return tuple(v for v in (hidden_states, next_cache, all_hidden_states) if v is not None)
class OPTModel(modeling_opt.OPTModel):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
self.post_init()
class OPTForCausalLM(modeling_opt.OPTForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = OPTModel(config)
self.post_init()
| alpaca_farm-main | src/alpaca_farm/flash_models/flash_opt.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import logging
logger = logging.get_logger(__name__)
try:
import apex
apex_is_installed = True
logger.warning("`apex` is installed. Using fused operators.")
except ImportError as e:
apex_is_installed = False
logger.warning("`apex` is not installed. Reverting to non-fused operators.")
def apex_layernorm(ln_module, input_):
if apex_is_installed:
return apex.normalization.fused_layer_norm.FusedLayerNormAffineFunction.apply(
input_, ln_module.weight, ln_module.bias, ln_module.normalized_shape, ln_module.eps
)
else:
return ln_module(input_)
def apex_rmsnorm(ln_module, input_):
if apex_is_installed:
return apex.normalization.fused_layer_norm.FusedRMSNormAffineFunction.apply(
input_, ln_module.weight, ln_module.weight.size(), ln_module.variance_epsilon
)
else:
return ln_module(input_)
| alpaca_farm-main | src/alpaca_farm/flash_models/apex_patch.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alpaca_farm-main | src/alpaca_farm/inference/__init__.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
import math
import sys
from typing import Callable, List, Optional, Sequence, Tuple, Union
import einops
import torch
import tqdm
import transformers
from .. import common, constants, distributed_utils, logging, torch_ops, utils
logger = logging.get_logger(__name__)
@dataclasses.dataclass
class NullCharCleanUp(object):
def __call__(self, string: str):
return string.replace("\x00", "")
def __repr__(self):
return "NullCharCleanUp cleans up the NULL chars to prevent db write failures due to encoding discrepancy."
def load_model_and_tokenizer_for_inference(
model_name_or_path: str,
cache_dir=constants.DEFAULT_CACHE_DIR,
model_cls=transformers.AutoModelForCausalLM,
model_kwargs: Optional[dict] = None,
tokenizer_kwargs: Optional[dict] = None,
resize_token_embeddings_if_mismatch=True,
) -> Tuple[transformers.PreTrainedModel, transformers.PreTrainedTokenizer]:
"""Load huggingface model and tokenizer from path or with name for inference.
This function should only be used for decoding or reward scoring.
Notes:
- This function is only guaranteed to work correctly when loading admissible model families,
i.e., opt and llama.
- Loaded models are in eval mode.
- By default, this function internally shrinks the model embedding size to avoid generating out of vocab tokens.
Models like OPT are by default created with embedding size that's divisible by 64, even though the vocab
size is not. This is to help with training speed, but can be problematic when generating, i.e., there is
a low probability of generating out of vocab ids (especially for untrained models).
- By default, loaded models are on the device specified by LOCAL_RANK or cpu.
- This behavior can be overridden by passing `device_map` to model_kwargs.
- By default, loaded tokenizers are slow tokenizers in left padding mode.
- This behavior can be overridden by passing `use_fast` and `padding_side` to tokenizer_kwargs.
"""
logger.warning(f"Loading model for inference: {model_name_or_path}")
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
default_model_kwargs = dict(low_cpu_mem_usage=True, device_map={"": device}, cache_dir=cache_dir)
if model_kwargs is None:
model_kwargs = default_model_kwargs
else:
default_model_kwargs.update(model_kwargs) # Make possible overriding default_model_kwargs.
model_kwargs = default_model_kwargs
default_tokenizer_kwargs = dict(padding_side="left", use_fast=False, cache_dir=cache_dir)
if tokenizer_kwargs is None:
tokenizer_kwargs = default_tokenizer_kwargs
else:
default_tokenizer_kwargs.update(tokenizer_kwargs)
tokenizer_kwargs = default_tokenizer_kwargs
model = model_cls.from_pretrained(model_name_or_path, **model_kwargs).eval()
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path, **tokenizer_kwargs)
if tokenizer.pad_token is None:
# base llama does not come with a pad token, possible for other pretrained models as well
tokenizer.add_special_tokens({"pad_token": constants.DEFAULT_PAD_TOKEN})
if isinstance(model, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)):
input_embedding_size = model.get_input_embeddings().weight.size(0)
num_tokens = len(tokenizer)
if input_embedding_size != num_tokens and resize_token_embeddings_if_mismatch:
logger.warning(
f"Model embedding size {input_embedding_size} is not equal to vocab size {num_tokens}. "
f"Shrinking/growing embedding size. "
"This is okay if your previous embeddings were inflated to a multiple of 64 for faster computation. "
"But generally, be cautious! This may cause unexpected behavior!!!"
)
utils.stable_resize_token_embeddings(model, num_tokens)
return model, tokenizer
@dataclasses.dataclass
class HFDecodingArguments:
"""Only the core args for decoding with HF models."""
top_p: float = 0.9
top_k: int = 0
temperature: float = 1.0
do_sample: bool = True
num_beams: int = 1
max_new_tokens: int = 100 # This is aligned with `openai_utils.OpenAIDecodingArguments`.
num_return_sequences: int = 1
@torch.inference_mode()
def decode_prompts_with_huggingface_given_model(
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizer,
prompts: Sequence[str],
decoding_args: HFDecodingArguments,
per_device_batch_size=20,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
pad_to_length=2048, # Force pad to this length for distributed communication to work.
tf32=True,
force_multisample_format: bool = False,
cleanup_funcs: Optional[Sequence[Callable]] = (NullCharCleanUp(),),
divide_work: bool = True,
internal_batch_return_sequences: Optional[int] = None,
seed: Optional[int] = None,
communication_num_chunks=1,
tokenization_batch_size=1000,
**decoding_kwargs,
) -> Union[List[List[str]], List[str]]:
"""Decode from a given model a sequence of string prompts."""
if seed is not None:
utils.manual_seed(seed)
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = tf32 # noqa
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
model.generate = common.cast_with_native_amp(model.generate, mixed_precision=mixed_precision)
logger.warning(f"mixed_precision = {mixed_precision}")
generate_kwargs = copy.deepcopy(decoding_args.__dict__)
generate_kwargs.update(
dict(eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, synced_gpus=world_size > 1)
)
generate_kwargs.update(decoding_kwargs) # Possibly overwrite default values for `pad_token_id` and `eos_token_id`.
prompts = prompts[:max_instances]
ori_data_size = len(prompts)
# Make the prompts set a multiple of world_size * per_device_batch_size by padding with the last prompt.
if world_size > 1 and divide_work:
multiple_of = world_size * per_device_batch_size
else:
multiple_of = per_device_batch_size
new_data_size = multiple_of * int(math.ceil(ori_data_size / multiple_of))
new_prompts = list(prompts) + [prompts[-1]] * (new_data_size - ori_data_size)
if world_size > 1 and divide_work: # divide into chunks
per_worker_size = new_data_size // world_size
new_prompts = new_prompts[local_rank * per_worker_size : (local_rank + 1) * per_worker_size]
# TODO(lxuechen): Refactor to tokenize upfront. This way we can pad with tokenizer, and not worry ourselves.
completions = []
for batch_idx, start_idx in tqdm.tqdm(
enumerate(range(0, len(new_prompts), per_device_batch_size)), # Increase the index by the actual batch size.
desc="decoding batches",
total=len(new_prompts) // per_device_batch_size,
disable=not distributed_utils.is_main_process(),
):
batch = new_prompts[start_idx : start_idx + per_device_batch_size]
source = tokenizer(batch, return_tensors="pt", padding=True)
source = common.prepare_inputs(source, device=device)
inputs, attention_mask = source.input_ids, source.attention_mask
if batch_idx == 0: # FSDP is buggy; we do a forward pass first to make it happy
model(input_ids=inputs, attention_mask=attention_mask)
if (
internal_batch_return_sequences is not None
and internal_batch_return_sequences < decoding_args.num_return_sequences
):
# we batch along the num_return_sequences dimension to avoid OOM errors
# usually, return_sequences is dimension (NxR, L) where N is the batch size and R is the number of
# return sequences
# we split this into batches of size (NxR', L) where R' is the number of return sequences in each batch
batch_generate_kwargs = copy.deepcopy(generate_kwargs)
# initialize the list of return sequences for each prompt
sequences = []
for internal_start_idx in range(
0, generate_kwargs["num_return_sequences"], internal_batch_return_sequences
):
internal_batch_size = batch_generate_kwargs["num_return_sequences"] = min(
internal_batch_return_sequences, generate_kwargs["num_return_sequences"] - internal_start_idx
)
internal_batch_sequences = model.generate(
inputs=inputs,
attention_mask=attention_mask,
**batch_generate_kwargs,
)
if not model.config.is_encoder_decoder:
internal_batch_sequences = internal_batch_sequences[:, inputs.shape[1] :]
internal_batch_sequences = torch_ops.right_pad(
internal_batch_sequences,
(internal_batch_sequences.size(0), pad_to_length),
value=tokenizer.pad_token_id,
)
# einops rearange (n d) l -> n d l
internal_batch_sequences = einops.rearrange(
internal_batch_sequences, "(n d) l -> n d l", d=internal_batch_size
)
# append the return sequences for each prompt
sequences.append(internal_batch_sequences)
# concatenate the return sequences for each prompt
sequences = torch.cat(sequences, dim=1)
sequences = einops.rearrange(
sequences,
"n d l -> (n d) l",
)
else:
if internal_batch_return_sequences is not None:
logger.warning(
f"internal_batch_return_sequences ({internal_batch_return_sequences}) >= "
f"num_return_sequences ({decoding_args.num_return_sequences}). Not batching over return sequences."
)
sequences = model.generate(inputs=inputs, attention_mask=attention_mask, **generate_kwargs)
if not model.config.is_encoder_decoder:
sequences = sequences[:, inputs.shape[1] :]
sequences = torch_ops.right_pad(sequences, (sequences.size(0), pad_to_length), value=tokenizer.pad_token_id)
out_of_bound_mask = sequences >= len(tokenizer)
if out_of_bound_mask.any():
logger.fatal(f"Found tokens outside the vocabulary: {sequences[out_of_bound_mask]}")
completions.append(sequences.cpu())
completions = torch.cat(completions, dim=0)
if world_size > 1 and divide_work:
torch.cuda.empty_cache()
logger.info(f"RANK {local_rank} starting all_gather with {communication_num_chunks} communication_num_chunks")
mine = einops.rearrange(completions, "(n d) l -> n d l", d=generate_kwargs["num_return_sequences"])
chunks = torch.chunk(mine, chunks=communication_num_chunks, dim=1)
all_chunk_list = [
distributed_utils.all_gather_and_cat(chunk.contiguous().to(device), dim=0).cpu() for chunk in chunks
]
completions = torch.cat(all_chunk_list, dim=1)
completions = einops.rearrange(completions, "n d l -> (n d) l")
logger.info(
f"RANK {local_rank} Start tokenizer batch decoding {completions.size(0)} sequences", main_process_only=False
)
# chunk completions into chunks of 1000 and tokenize
text_sequences = []
for start_idx in tqdm.trange(0, completions.size(0), tokenization_batch_size):
text_sequences.extend(
tokenizer.batch_decode(
completions[start_idx : start_idx + tokenization_batch_size],
skip_special_tokens=True,
)
)
if cleanup_funcs is not None:
for cleanup_func in cleanup_funcs:
text_sequences = [cleanup_func(s) for s in text_sequences]
logger.info(f"RANK {local_rank} Finished tokenizer batch decoding and cleaning", main_process_only=False)
# convert the list into a nested list of consecutive `num_return_sequences` items, if > 1.
if decoding_args.num_return_sequences > 1 or force_multisample_format:
text_sequences = [
text_sequences[i : i + decoding_args.num_return_sequences]
for i in range(0, len(text_sequences), decoding_args.num_return_sequences)
]
text_sequences = text_sequences[:ori_data_size]
return text_sequences
def decode_prompts_with_huggingface(
model_name_or_path: str,
prompts: Sequence[str],
decoding_args: HFDecodingArguments,
cache_dir=constants.DEFAULT_CACHE_DIR,
per_device_batch_size=20,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
pad_to_length=2048, # Force pad to this length for distributed communication to work.
tf32=True,
force_multisample_format: bool = False,
seed: Optional[int] = None,
communication_num_chunks: int = 1,
**decoding_kwargs,
) -> Union[List[List[str]], List[str]]:
"""Decode from a huggingface model given a sequence of string prompts.
Args:
prompts: A sequence of string prompts.
decoding_args: Decoding arguments.
model_name_or_path: The name or path of the huggingface model. If it is a path, the directory location should also store
the tokenizer.
per_device_batch_size: The batch size per device for decoding.
cache_dir: The directory to cache the huggingface model.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
max_instances: The maximum number of prompts to decode.
pad_to_length: The token length to pad the prompts. This is necessary for and only used in distributed decoding.
tf32: Whether to use tensorfloat32 for matrix multiplication.
force_multisample_format: Whether to force the outputs to be in the multisample format.
seed: The random seed. If None, this function is generally not deterministic, unless the seed is fixed outside.
communication_num_chunks: Number of chunks to create for final communication.
Increase this to reduce the size of the chunk per communication.
**decoding_kwargs: Misc keyword args for `model.generate`.
Setting values here may override the values given by `decoding_args`.
Returns:
A list of string responses, if `num_return_sequences` is 1 and not `force_multisample_format`;
otherwise, a list of lists of string responses.
"""
model, tokenizer = load_model_and_tokenizer_for_inference(
model_name_or_path=model_name_or_path,
cache_dir=cache_dir,
model_kwargs=dict(torch_dtype=utils.convert_str_dtype_to_torch_dtype(mixed_precision)),
)
return decode_prompts_with_huggingface_given_model(
model=model,
tokenizer=tokenizer,
prompts=prompts,
decoding_args=decoding_args,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
max_instances=max_instances,
pad_to_length=pad_to_length,
tf32=tf32,
force_multisample_format=force_multisample_format,
seed=seed,
communication_num_chunks=communication_num_chunks,
**decoding_kwargs,
)
| alpaca_farm-main | src/alpaca_farm/inference/decode.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
from typing import List, Optional, Sequence, Tuple
import einops
import torch
import tqdm
import transformers
from torch import nn
from .. import common, constants, distributed_utils, logging, utils
from ..models import reward_model
from .decode import load_model_and_tokenizer_for_inference
logger = logging.get_logger(__name__)
@torch.inference_mode()
def score_sequences_with_huggingface_given_model(
model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
sequences: Sequence[str],
per_device_batch_size=20,
max_instances=sys.maxsize,
mixed_precision: Optional[str] = None,
tf32=False,
divide_work=True,
):
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = tf32 # noqa
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
model.forward = common.cast_with_native_amp(model.forward, mixed_precision=mixed_precision)
logger.warning(f"mixed_precision = {mixed_precision}")
sequences = sequences[:max_instances]
ori_data_size = len(sequences)
# To make communication work, we round up the dataset to the nearest multiple of the actual batch size.
if world_size > 1 and divide_work:
batch_size = per_device_batch_size * world_size
else:
batch_size = per_device_batch_size
new_data_size = batch_size * int(math.ceil(ori_data_size / batch_size)) # Nearest multiple.
new_sequences = list(sequences) + [sequences[-1]] * (new_data_size - ori_data_size) # Pad with the last prompt.
return_rewards = []
for batch_idx, start_idx in tqdm.tqdm(
enumerate(range(0, new_data_size, batch_size)),
desc="evaluating rewards for batches",
total=new_data_size // batch_size,
disable=not distributed_utils.is_main_process(),
):
batch = new_sequences[start_idx : start_idx + batch_size]
if world_size > 1 and divide_work:
local_batch = batch[local_rank * per_device_batch_size : (local_rank + 1) * per_device_batch_size]
else:
local_batch = batch
source = tokenizer(
local_batch,
return_tensors="pt",
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
)
source = common.prepare_inputs(source, device=device)
rewards = model(input_ids=source.input_ids, attention_mask=source.attention_mask).rewards
if world_size > 1 and divide_work:
rewards = distributed_utils.all_gather_and_cat(rewards, dim=0)
return_rewards.extend(rewards.tolist())
return return_rewards[:ori_data_size]
def score_sequences_with_huggingface(
sequences: Sequence[str],
model_name_or_path: str,
per_device_batch_size=20,
cache_dir=constants.DEFAULT_CACHE_DIR,
max_instances=sys.maxsize,
mixed_precision: Optional[str] = None,
tf32=False,
flash_attn=False,
) -> List[float]:
"""Score samples with a reward model.
Args:
sequences: A sequence of strings.
model_name_or_path: Name of the reward model.
per_device_batch_size: The batch size per device for evaluating rewards.
cache_dir: The directory to cache the huggingface model.
max_instances: The maximum number of prompts to rerank.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
A list of floats representing rewards.
"""
model, tokenizer = load_model_and_tokenizer_for_inference(
model_name_or_path=model_name_or_path,
model_cls=reward_model.RewardModel,
cache_dir=cache_dir,
model_kwargs=dict(
torch_dtype=utils.convert_str_dtype_to_torch_dtype(mixed_precision),
flash_attn=flash_attn,
),
)
return score_sequences_with_huggingface_given_model(
model=model,
tokenizer=tokenizer,
sequences=sequences,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
max_instances=max_instances,
tf32=tf32,
)
@torch.inference_mode()
def rerank_sequences_with_huggingface(
sequences: Sequence[Sequence[str]],
model_name_or_path: str,
rerank_top_k=1,
per_device_batch_size=20,
cache_dir=constants.DEFAULT_CACHE_DIR,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
tf32=False,
flash_attn=False,
) -> Tuple[List[List[str]], List[List[int]]]:
"""Rerank samples with a reward model.
Args:
sequences: A nested sequence of strings. Each inner sequence contains samples with the same prompt.
model_name_or_path: Name of the reward model.
rerank_top_k: The number of top samples to return.
per_device_batch_size: The batch size per device for evaluating rewards.
cache_dir: The directory to cache the huggingface model.
max_instances: The maximum number of prompts to rerank.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
A tuple with two entries.
The first is a nested sequence of strings. Each inner sequence contains the top-k samples with the same prompt.
The second is a nested sequence of integers. Each inner sequence contains the indices of the top-k samples.
"""
sequences = sequences[:max_instances]
flat_sequences = [sequence_i_j for sequence_i in sequences for sequence_i_j in sequence_i]
rewards = score_sequences_with_huggingface(
sequences=flat_sequences,
model_name_or_path=model_name_or_path,
per_device_batch_size=per_device_batch_size,
cache_dir=cache_dir,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
)
rewards = einops.rearrange(torch.tensor(rewards), "(b m) -> b m", m=len(sequences[0]))
# Nested list of "size" (data_size, num_options).
top_indices = rewards.topk(rerank_top_k, dim=1).indices.tolist()
top_sequences = [[sequence[i] for i in top_index] for sequence, top_index in utils.zip_(sequences, top_indices)]
return top_sequences, top_indices
| alpaca_farm-main | src/alpaca_farm/inference/score.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reward conditioning a la QUARK.
For all-quantiles formulation, during decoding, each instance takes the following form (except first decoding stage):
<bos_token><reward_cond_token><query><response><eos_token>
E.g.,
<s><reward_0>Tell me something about alpacas.Alpacas are cute.</s>
"""
import contextlib
import os
from typing import Callable, Dict, Sequence, Tuple
import accelerate
import pandas as pd
import torch
import torch.nn.functional as F
import tqdm
import transformers
from torch import nn
from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.utils.data import DataLoader
from transformers.modeling_utils import unwrap_model
from .. import accelerate_patch, common, constants, data_preprocessor, data_utils, logging, utils
from ..models import reward_model as reward_model_module
from ..models import rl_models
from ..types import AnyPath, AnyPathOrNone, LRScheduler, Optional, Tensor
from . import kl_controller, rl_trainer
FIRST_STEP_IDX = 1
logger = logging.get_logger(__name__)
def ignore_tokens(input_ids: Tensor, attention_mask: Tensor, tokens_to_ignore: Sequence[int]):
"""Clear out positions where input_ids has tokens_to_ignore in attention_mask."""
attention_mask = attention_mask.clone()
for token_to_ignore in tokens_to_ignore:
attention_mask[input_ids == token_to_ignore] = 0
return input_ids, attention_mask
class DataPool(object):
def __init__(self, tokenizer: transformers.PreTrainedTokenizer):
self.tokenizer = tokenizer
self.additional_special_tokens = tokenizer.additional_special_tokens
self.queries = []
self.responses = []
self.rewards = []
def add(self, queries, responses, rewards):
for main_list, this_list in utils.zip_(
(self.queries, self.responses, self.rewards), (queries, responses, rewards)
):
main_list.extend(this_list)
def clear(self):
(self.queries, self.responses, self.rewards) = [], [], []
def sort_and_get(self, train_on_best_quantile=True):
queries, responses, rewards = utils.parallel_sort(
self.queries,
self.responses,
self.rewards,
key=lambda x: x[-1],
reverse=True,
)
size = len(queries)
chunk_sizes = [size // len(self.additional_special_tokens)] * len(self.additional_special_tokens)
chunk_sizes[-1] = chunk_sizes[-1] + size % len(self.additional_special_tokens)
assert sum(chunk_sizes) == size, "Internal error: Sum of chunk sizes doesn't match up with total size."
if train_on_best_quantile: # Don't inject any tokens here.
queries, responses, rewards = tuple(l[: chunk_sizes[0]] for l in (queries, responses, rewards))
else:
injected_tokens = []
for chunk_index, chunk_size in enumerate(chunk_sizes):
injected_tokens.extend([self.additional_special_tokens[chunk_index]] * chunk_size)
queries = [f"{injected_token}{query}" for injected_token, query in utils.zip_(injected_tokens, queries)]
return queries, responses, rewards
class QuarkTrainer(rl_trainer.RLTrainer):
def __init__(
self,
args,
train_dataset: data_utils.QueryDataset,
eval_dataset: data_utils.QueryDataset,
data_collator: Callable,
policy: nn.Module,
ref_policy: nn.Module,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super().__init__(
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
tokenizer=tokenizer,
accelerator=accelerator,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
self.data_pool = DataPool(self.tokenizer)
self.entropy_ctl = kl_controller.FixedKLController(kl_coef=args.entropy_coef)
self.sft_dataloader = None # Must be instantiated in `rollout`.
def train(self):
total_epochs = self.args.total_epochs
total_episodes = len(self.train_dataset) * total_epochs # noqa
total_steps = total_episodes // self.args.rollout_batch_size # noqa
logger.warning(
f"***Training starts***\n"
f"Total epochs: {total_epochs} => Total episodes: {total_episodes} => Total steps: {total_steps}",
)
self.create_optimizer_and_scheduler(total_steps)
infinite_train_dataloader = self.get_train_dataloader()
for step_idx in tqdm.tqdm(
range(FIRST_STEP_IDX, total_steps + FIRST_STEP_IDX),
disable=not self.accelerator.is_main_process,
desc="steps",
total=total_steps,
):
if step_idx % self.args.save_steps == 0 or step_idx in self.args.save_steps_extra_list:
self.save_model(utils.join(self.args.output_dir, f"checkpoint-{step_idx}"))
if self.args.eval_steps is not None and step_idx % self.args.eval_steps == 0:
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
unwrapped_policy = unwrapped_policy.base_model
self.evaluate(step_idx, unwrapped_policy=unwrapped_policy)
self.log_history.append(self.step(infinite_train_dataloader, step_idx))
return self.log_history
def step(self, train_dataloader, step_idx, **kwargs):
rollouts_dataloader = self.rollout(train_dataloader, step_idx)
stats_list = []
for _ in tqdm.tqdm(
range(self.args.num_gradient_steps_per_step), disable=not self.accelerator.is_main_process, desc="gradstep"
):
for substep_idx in range(1, self.accelerator.gradient_accumulation_steps + 1):
# WARNING: self.accelerator.accumulate can lead to misleading results, since sync_gradients is
# dependent on whether the registered dataloader ends or not (or step % accumulation_steps).
# If your dataloader ends before the last step, gradients are not synced, and the optimizer wants to
# update. This gives you a shape mismatch error.
should_sync = substep_idx == self.accelerator.gradient_accumulation_steps
context = contextlib.nullcontext if should_sync else self.accelerator.no_sync
# no_sync here results in higher memory usage because FSDP will accumulate the full model gradients
# (instead of gradient shards) until the eventual sync.
with context(self.policy):
batch = next(rollouts_dataloader)
loss, stats_for_this_step = self.compute_loss(batch, **kwargs)
self.accelerator.backward(loss)
if should_sync:
if self.args.max_grad_norm is not None:
self.accelerator.clip_grad_norm_(self.policy.parameters(), self.args.max_grad_norm)
stats_for_this_step["loss/grad_norm"] = self._compute_grad_norm()
stats_list.append(stats_for_this_step)
self.accelerator.unwrap_optimizer(self.optimizer).step()
self.policy.zero_grad(set_to_none=True)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
stats = common.merge_dict(stats_list, torch.stack) # list of dict -> dict: str -> 1-D tensor
stats = self.record_step_stats(stats, step_idx=step_idx)
return stats
def compute_loss(
self, batch: Dict[str, Tensor], logprobs_coef=1.0, kl_coef=None, entropy_coef=None
) -> Tuple[Tensor, Dict]:
self.policy.train()
queries, query_attn_masks, responses = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device),
keys=("queries", "query_attn_masks", "responses"),
return_type=tuple,
)
queries_no_quark, query_attn_masks_no_quark = ignore_tokens(
input_ids=queries,
attention_mask=query_attn_masks,
tokens_to_ignore=self.tokenizer.additional_special_tokens_ids,
)
policy_outputs = self.policy(queries, query_attn_masks, responses, temperature=self.args.temperature)
with torch.inference_mode():
ref_policy_outputs = self.ref_policy(
queries_no_quark, query_attn_masks_no_quark, responses, temperature=self.args.temperature
)
logits, logprobs = common.unpack_dict(policy_outputs, keys=("logits", "logprobs"))
(ref_logits,) = common.unpack_dict(ref_policy_outputs, keys=("logits",))
original_vocab_size = len(self.tokenizer) - self.args.num_reward_tokens
logits, ref_logits = tuple(t[..., :original_vocab_size] for t in (logits, ref_logits))
kl_per_token = F.kl_div(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1), reduction="none").sum(
dim=-1
)
entropies = -(logits.softmax(dim=-1) * logits.log_softmax(dim=-1)).sum(dim=-1)
# https://github.com/GXimingLu/Quark/blob/a4baf754de15f4d9675dd394571a7dd35fc0abd9/main.py#L252
assert responses.size() == logprobs.size() == kl_per_token.size() == entropies.size()
masks = responses == self.tokenizer.pad_token_id
kl_per_token.masked_fill_(masks, 0.0)
entropies.masked_fill_(masks, 0.0)
kl_coef = self.kl_ctl.value if kl_coef is None else kl_coef
entropy_coef = self.entropy_ctl.value if entropy_coef is None else entropy_coef
loss = -logprobs * logprobs_coef + kl_per_token * kl_coef - entropies * entropy_coef
loss = loss.mean()
kl_avg_seq = kl_per_token.sum() / (~masks).sum() # noqa
kl_sum_seq = kl_per_token.sum() / kl_per_token.size(0)
stats = dict(
train=dict(
logprobs=logprobs.mean(),
entropies=entropies.mean(),
kl_avg_seq=kl_avg_seq,
kl_sum_seq=kl_sum_seq,
loss=loss,
masks=masks.float().sum(dim=1).mean(dim=0), # noqa
),
)
return loss, common.flatten_dict(stats, sep="/", postprocess_fn=lambda x: x.detach())
def get_train_dataloader(self):
logger.warning(f"Train dataset size: {len(self.train_dataset)}")
train_dataloader = DataLoader(
dataset=self.train_dataset,
collate_fn=self.data_collator,
batch_size=self.args.rollout_per_device_batch_size,
shuffle=True, # Don't actually need to shuffle; shuffle to make consistent.
drop_last=True,
)
train_dataloader = self.accelerator.prepare(train_dataloader) # noqa
self._log_batch_size(train_dataloader, "train_dataloader")
return utils.InfiniteLoader(train_dataloader)
@torch.inference_mode()
def rollout(self, train_dataloader: utils.InfiniteLoader, step_idx: int) -> utils.InfiniteLoader:
"""Get responses conditioned on top reward token and add to data pool."""
self.policy.eval()
self._make_fsdp_happy()
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
if self.args.clear_data_pool_on_each_rollout:
self.data_pool.clear()
text_queries_all, text_responses_all, rewards_all = [], [], []
for batch_idx in tqdm.tqdm(
range(self.args.rollout_accumulation_steps), disable=not self.accelerator.is_main_process, desc="rollout"
):
batch = next(train_dataloader)
queries, query_attn_masks = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device), keys=("queries", "query_attn_masks")
)
if step_idx == FIRST_STEP_IDX: # Must ignore the reward token on first generation.
queries, query_attn_masks = ignore_tokens(
input_ids=queries,
attention_mask=query_attn_masks,
tokens_to_ignore=self.tokenizer.additional_special_tokens_ids,
)
respond_outputs = unwrapped_policy.respond(queries, query_attn_masks, temperature=self.args.temperature)
(responses,) = common.unpack_dict(respond_outputs, ("responses",))
# Strings below should not contain reward tokens.
text_queries, text_responses = tuple(
self.tokenizer.batch_decode(tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for tensor in (queries, responses)
)
del queries, responses # Prevent mistakes.
text_sequences = [q + r for q, r in utils.zip_(text_queries, text_responses)]
sequences = self.tokenizer(text_sequences, return_tensors="pt", padding=True, truncation=True)
rewards = self.reward_model(**sequences).rewards
# Nothing in here should contain the reward token!
self.data_pool.add(queries=text_queries, responses=text_responses, rewards=rewards.tolist())
text_queries_all.extend(text_queries)
text_responses_all.extend(text_responses)
rewards_all.extend(rewards.tolist())
if self.accelerator.is_main_process:
rollouts_to_disk = {"queries": text_queries_all, "responses": text_responses_all, "rewards": rewards_all}
rollouts_to_disk = pd.DataFrame(rollouts_to_disk).to_dict(orient="records")
utils.jdump(rollouts_to_disk, utils.join(self.args.output_dir, "rollouts", f"step_{step_idx}.json"))
self.accelerator.log({"train/reward": utils.mean(rewards_all)}, step=step_idx)
text_queries, text_responses, _ = self.data_pool.sort_and_get(self.args.train_on_best_quantile)
rollouts_dataset = data_preprocessor.QueryResponseDataset(
tokenizer=self.tokenizer,
queries=text_queries,
responses=text_responses,
query_len=self.args.query_len,
response_len=self.args.response_len,
)
rollouts_dataloader = DataLoader(
dataset=rollouts_dataset,
collate_fn=data_utils.DataCollatorForStackableDataset(),
batch_size=self.args.step_per_device_batch_size,
shuffle=True,
drop_last=True,
)
rollouts_dataloader = utils.InfiniteLoader(rollouts_dataloader)
return rollouts_dataloader
def record_step_stats(self, stats, step_idx, **kwargs):
for k, v in stats.items():
stats[k] = v.mean(dim=0)
stats = {key: value.item() if torch.is_tensor(value) else value for key, value in stats.items()}
stats["train/kl_coef"] = self.args.kl_coef
stats["train/entropy_coef"] = self.args.entropy_coef
stats["train/lr"] = self.optimizer.param_groups[0]["lr"]
if self.accelerator.is_main_process:
self.accelerator.log(stats, step=step_idx)
return stats
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None, give_rw_access=True):
output_dir = self.args.output_dir if output_dir is None else output_dir
utils.makedirs(output_dir)
model, tokenizer = self.policy, self.tokenizer
with FSDP.state_dict_type(
model, StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
):
logger.warning("Gathering full state_dict...")
state_dict = model.state_dict()
logger.warning("Finished gathering full state_dict...")
if self.accelerator.is_main_process:
# Retain and remap policy keys.
new_state_dict = dict()
prefix = "base_model."
for key, value in state_dict.items():
if key.startswith(prefix):
new_state_dict[key[len(prefix) :]] = value
state_dict = new_state_dict
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
unwrapped = unwrap_model(model).base_model
assert isinstance(
unwrapped, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)
), f"Expected to save a generative policy, but found model to be of type: {type(unwrapped)}."
if hasattr(unwrapped, "_keys_to_ignore_on_save"):
logger.warning(f"keys to ignore on save: {unwrapped._keys_to_ignore_on_save}")
logger.warning(f"Saving model checkpoint to {output_dir}")
logger.warning(f"Saving {len(cpu_state_dict)} keys:\n{utils.jdumps(cpu_state_dict.keys())}")
unwrapped.save_pretrained(output_dir, state_dict=cpu_state_dict)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, constants.TRAINING_ARGS_NAME))
def _make_left_padded_tokenizer(
model_name_or_path: AnyPath,
cache_dir: AnyPathOrNone = constants.DEFAULT_CACHE_DIR,
**kwargs,
) -> transformers.PreTrainedTokenizer:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir=cache_dir,
padding_side="left",
**kwargs,
)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens(dict(pad_token=constants.DEFAULT_PAD_TOKEN))
return tokenizer
def make_tokenizer(args):
# policy_tokenizer left pads, since the policy requires batch decoding.
policy_tokenizer = _make_left_padded_tokenizer(
args.policy_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
# reward_tokenizer left pads, since we need the embedding of the right most non-pad token.
reward_tokenizer = _make_left_padded_tokenizer(
args.reward_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
if policy_tokenizer.get_vocab() != reward_tokenizer.get_vocab():
raise ValueError("AlpacaFarm does not support different tokenizer for policy and reward models.")
logger.warning(f"Adding {args.num_reward_tokens} reward conditioning tokens for Quark.")
policy_tokenizer.add_special_tokens(
{"additional_special_tokens": [f"<reward_{i}>" for i in range(args.num_reward_tokens)]} # noqa
)
return policy_tokenizer
def make_models(
tokenizer: transformers.PreTrainedTokenizer,
args,
accelerator: accelerate.Accelerator,
):
def make_generative_policy():
base_model = common.make_generative_lm(
model_name_or_path=args.policy_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
utils.stable_resize_token_embeddings(base_model, len(tokenizer), jitter_new_embeddings=True)
return base_model
def make_reward_model():
return reward_model_module.RewardModel.from_pretrained(
args.reward_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
policy = common.prepare_model_for_custom_fn(model=policy, fn_name="respond", accelerator=accelerator)
policy = accelerator.prepare(policy) # noqa
ref_policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
ref_policy.requires_grad_(False)
ref_policy = accelerator.prepare(ref_policy) # noqa
reward_model = make_reward_model()
reward_model.requires_grad_(False)
reward_model = accelerator.prepare(reward_model)
# TODO: This is a hack to get FSDP running. Remove in the future when this is fixed.
if accelerator.distributed_type == accelerate.DistributedType.FSDP:
inputs = tokenizer("fsdp are you happy now??? :)" * 50, return_tensors="pt")
inputs = {key: value.to(accelerator.device) for key, value in inputs.items()}
policy(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
return dict(policy=policy, ref_policy=ref_policy, reward_model=reward_model)
| alpaca_farm-main | src/alpaca_farm/rl/quark_trainer.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import transformers
from alpaca_farm import distributed_utils
from .. import constants, logging
logger = logging.get_logger(__name__)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: str = field(default="alpaca_instructions")
train_splits: List[str] = field(default_factory=lambda: ["unlabeled"])
eval_splits: List[str] = field(default_factory=lambda: ["val"])
prompt_dict_path: str = field(
default=None,
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
wandb_project: str = field(default=constants.WANDB_PROJECT)
cache_dir: Optional[str] = field(default=constants.DEFAULT_CACHE_DIR)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
truncate_tokens: Optional[List[str]] = field(
default_factory=lambda: None,
metadata={
"help": "Tokens in strings to truncate at first occurrence. "
"This was used in original OAI summarization paper to avoid models returning incomplete sentences. "
},
)
truncate_after: Optional[int] = field(
default=None, metadata={"help": "Truncate after this number of tokens. Prevents early truncation."}
)
penalty_reward_value: float = field(
default=-1.0,
metadata={
"help": "Reward assigned to sequences that are truncated, "
"e.g., due to outputting incomplete sentences for given context window."
},
)
total_epochs: int = field(default=10)
rollout_batch_size: int = field(default=512)
step_batch_size: int = field(default=256)
rollout_per_device_batch_size: int = field(default=32)
step_per_device_batch_size: int = field(default=2)
adam_epsilon: float = field(
default=1e-5,
metadata={
"help": "Epsilon for AdamW optimizer. "
"This is the default for OAI PPO code and UW Quark code. "
"This is not the Hugging Face default."
},
)
temperature: float = field(default=1.0)
kl_coef: float = field(default=0.2)
target_kl: float = field(default=6.0)
k_beta: float = field(default=0.1)
adaptive_kl: bool = field(default=False)
eval_batches: int = field(default=sys.maxsize, metadata={"help": "Maximum number of batches to evaluate on."})
save_steps_extra: Optional[str] = field(
default=None,
metadata={
"help": "A list of predetermined checkpoints to save, represented in the format 'no1__no2__no3'. "
"Parse this with str.split('__')."
},
)
query_len: int = field(default=192)
response_len: int = field(default=300)
policy_model_name_or_path: str = field(default=None)
reward_model_name_or_path: str = field(default=None)
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
num_reward_tokens: int = field(default=4, metadata={"help": "Number of extra reward conditioning tokens in Quark."})
entropy_coef: float = field(
default=0.0,
metadata={"help": "Entropy regularization coefficient for Quark."},
)
clear_data_pool_on_each_rollout: bool = field(
default=True,
metadata={"help": "If True, clear the data pool before each rollout period for Quark."},
)
train_on_best_quantile: bool = field(
default=True,
metadata={"help": "If True, train only on the examples with best rewards for Quark."},
)
num_gradient_steps_per_step: int = field(
default=1,
metadata={"help": "Number of gradient steps to take per step for Quark."},
)
def __post_init__(self):
# Super class' __post_init__ is very complicated; don't do super for now in case mess something up.
# super().__post_init__()
if self.tf32: # super().__post_init__() actually does this.
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = True # noqa
world_size = distributed_utils.get_world_size()
# Checks on rollout_batch_size only matter for PPO.
assert self.rollout_batch_size >= self.rollout_per_device_batch_size * world_size, (
"rollout_batch_size is smaller than rollout_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.rollout_batch_size % (self.rollout_per_device_batch_size * world_size) == 0
), "rollout_batch_size is not a multiple of rollout_per_device_batch_size * world_size. "
assert self.step_batch_size >= self.step_per_device_batch_size * world_size, (
"step_batch_size is smaller than step_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.step_batch_size % (self.step_per_device_batch_size * world_size) == 0
), "step_batch_size is not a multiple of step_per_device_batch_size * world_size. "
logger.warning(
f"Rollout stats:\n"
f"\trollout_batch_size: {self.rollout_batch_size}\n"
f"\trollout_per_device_batch_size: {self.rollout_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.rollout_batch_size // self.rollout_per_device_batch_size) % world_size == 0
self.rollout_accumulation_steps = self.rollout_batch_size // self.rollout_per_device_batch_size // world_size
logger.warning(
f"Step stats:\n"
f"\tstep_batch_size: {self.step_batch_size}\n"
f"\tstep_per_device_batch_size: {self.step_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.step_batch_size // self.step_per_device_batch_size) % world_size == 0
self.gradient_accumulation_steps = self.step_batch_size // self.step_per_device_batch_size // world_size
logger.warning(
f"Accumulation steps:\n"
f"\trollout_accumulation_steps: {self.rollout_accumulation_steps}\n"
f"\tgradient_accumulation_steps: {self.gradient_accumulation_steps}\n"
)
if self.save_steps_extra is not None:
self.save_steps_extra_list = [int(string) for string in self.save_steps_extra.split("__")]
else:
self.save_steps_extra_list = []
assert self.num_reward_tokens > 1, "Quark requires at least 2 reward tokens."
def set_truncate_token_ids(self, tokenizer: transformers.PreTrainedTokenizer):
"""Convert truncation token to token ids.
This is called in RLTrainer.
"""
truncate_tokens = self.truncate_tokens
if truncate_tokens is None:
truncate_token_ids = None
else:
truncate_token_ids = tokenizer.convert_tokens_to_ids(truncate_tokens)
self.truncate_token_ids = truncate_token_ids
| alpaca_farm-main | src/alpaca_farm/rl/quark_utils.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alpaca_farm-main | src/alpaca_farm/rl/__init__.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Union
import numpy as np
import torch
import torch.distributed as dist
class KLController(abc.ABC):
value: Union[int, float]
def step(self, *args, **kwargs):
pass
class FixedKLController(KLController):
def __init__(self, kl_coef):
super(FixedKLController, self).__init__()
self.value = kl_coef
class AdaptiveKLController(KLController):
def __init__(self, init_kl_coef, target_kl, k_beta, accelerator=None):
super(AdaptiveKLController, self).__init__()
self.value = init_kl_coef
self.target_kl = target_kl
self.k_beta = k_beta
self.accelerator = accelerator
def step(self, current_kl: float):
if self.accelerator is not None:
current_kl = torch.tensor(current_kl, device=self.accelerator.device)
dist.all_reduce(current_kl, op=dist.ReduceOp.SUM)
current_kl = (current_kl / self.accelerator.num_processes).item()
proportional_error = np.clip(current_kl / self.target_kl - 1, -0.2, 0.2)
mult = 1.0 + self.k_beta * proportional_error
self.value *= mult
def make_kl_controller(args, accelerator=None):
if args.adaptive_kl:
return AdaptiveKLController(
init_kl_coef=args.kl_coef,
target_kl=args.target_kl,
k_beta=args.k_beta,
accelerator=accelerator,
)
else:
return FixedKLController(kl_coef=args.kl_coef)
| alpaca_farm-main | src/alpaca_farm/rl/kl_controller.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable, Dict, Optional, Tuple
import accelerate
import pandas as pd
import torch
import tqdm
import transformers
from torch import nn
from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from transformers.modeling_utils import unwrap_model
from .. import accelerate_patch, common, constants, data_preprocessor, logging, torch_ops, utils
from ..models import reward_model as reward_model_module
from ..models import rl_models
from ..types import AnyPath, AnyPathOrNone, LRScheduler, Tensor
from . import rl_trainer
logger = logging.get_logger(__name__)
class PPOTrainer(rl_trainer.RLTrainer):
def __init__(
self,
args,
train_dataset: data_preprocessor.QueryDataset,
eval_dataset: data_preprocessor.QueryDataset,
data_collator: Callable,
policy: rl_models.ActorCritic,
ref_policy: rl_models.Policy,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super(PPOTrainer, self).__init__(
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
tokenizer=tokenizer,
accelerator=accelerator,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
def _shape_reward(
self, rewards: Tensor, responses: Tensor, logprobs: Tensor, ref_logprobs: Tensor
) -> Dict[str, Tensor]:
# For some reason, line below doesn't work.
# kl = (logits.softmax(dim=-1) * (logits.log_softmax(dim=-1) - ref_logits.log_softmax(dim=-1))).sum(dim=-1)
kl = torch.clamp(logprobs - ref_logprobs, min=0.0)
non_score_rewards = -self.kl_ctl.value * kl
shaped_rewards = non_score_rewards.clone()
# This introduces a small index off by one bug if pad_token_id == eos_token_id.
terminal_positions = (responses != self.tokenizer.pad_token_id).sum(dim=1) - 1
shaped_rewards[list(range(rewards.size(0))), terminal_positions] += rewards
return dict(shaped_rewards=shaped_rewards, non_score_rewards=non_score_rewards, kl=kl)
def _estimate_advantage(self, rewards: Tensor, values: Tensor) -> Dict[str, Tensor]:
"""Generalized advantage estimation.
Reference:
https://arxiv.org/abs/1506.02438
"""
if self.args.whiten_rewards:
rewards = torch_ops.whiten(rewards, shift_mean=False)
lastgaelam = 0
advantages_reversed = []
gen_length = self.args.response_len
for t in reversed(range(gen_length)):
nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0
delta = rewards[:, t] + self.args.gamma * nextvalues - values[:, t]
lastgaelam = delta + self.args.gamma * self.args.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], dim=1)
returns = advantages + values
advantages = torch_ops.whiten(advantages, shift_mean=True)
return dict(returns=returns, advantages=advantages)
@torch.inference_mode()
def rollout(self, queries_data) -> Dict[str, Tensor]:
"""Rollout trajectories with policy.
Args:
queries_data: Sequence of batches or DataLoader.
Each batch is a dict with keys 'queries' and 'query_attn_masks'.
Returns:
Dictionary with keys
'queries', 'query_attn_masks', 'responses',
'logprobs', 'ref_logprobs', 'values',
'rewards', 'non_score_rewards', 'shaped_rewards'.
"""
# Give up dropout throughout.
self.policy.eval()
self._make_fsdp_happy()
# `keep_fp32_wrapper` retains the autocast wrapper of model.forward created by accelerate:
# recall one sets mixed precision options with accelerator.
# The precise value of this arg doesn't matter here, since we use the unwrapped model only for respond.
# Generally, try to use the wrapped model as much as you can, since it's got the autocast/cast-back wrappers.
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
self.ref_policy.eval()
self.reward_model.eval()
rollouts = []
for batch_idx, batch in tqdm.tqdm(
enumerate(queries_data),
disable=not self.accelerator.is_main_process,
desc="rollout",
):
# Sample rollouts.
queries, query_attn_masks = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device),
keys=("queries", "query_attn_masks"),
)
respond_outputs = unwrapped_policy.respond(queries, query_attn_masks, temperature=self.args.temperature)
(responses,) = common.unpack_dict(respond_outputs, ("responses",))
# Evaluate logprobs of the samples.
rollouts_batch = {"queries": queries, "query_attn_masks": query_attn_masks, "responses": responses}
policy_outputs = self.policy(**rollouts_batch, temperature=self.args.temperature)
ref_policy_outputs = self.ref_policy(**rollouts_batch, temperature=self.args.temperature)
policy_outputs = common.unpack_dict(
policy_outputs, keys=("logprobs", "values", "entropies"), return_type=dict
)
ref_policy_outputs = common.unpack_dict(
ref_policy_outputs, keys=("logprobs", "entropies"), return_type=dict
)
rollouts_batch.update(policy_outputs)
rollouts_batch.update({f"ref_{key}": value for key, value in ref_policy_outputs.items()})
# Evaluate reward of the samples.
text_queries, text_responses = tuple(
self.tokenizer.batch_decode(tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for tensor in (queries, responses)
)
del queries, responses # Prevent mistakes.
# We retokenizer, since policy and reward model might not have the same tokenizer.
# TODO(lxuechen): Avoid retokenization when policy and reward tokenizer are the same.
text_sequences = [q + r for q, r in utils.zip_(text_queries, text_responses)]
# TODO(lxuechen): This response retokenization has issues with OPT, since the tokenizer always prepend
# <bos_token>. But the issue is local to post_reward, which isn't an issue if we don't penalize.
sequences, responses = tuple(
self.tokenizer(text, return_tensors="pt", padding=True, truncation=True)
for text in (text_sequences, text_responses)
)
sequences, responses = common.prepare_inputs((sequences, responses), device=self.accelerator.device)
reward_outputs = self.reward_model(**sequences)
reward_outputs = self.post_reward(reward_outputs, responses.input_ids)
rollouts_batch.update(reward_outputs)
# Shape reward with KL penalty.
shape_reward_outputs = self._shape_reward(
rewards=rollouts_batch["rewards"],
responses=rollouts_batch["responses"],
logprobs=rollouts_batch["logprobs"],
ref_logprobs=rollouts_batch["ref_logprobs"],
)
rollouts_batch.update(shape_reward_outputs)
rollouts_batch_cpu = {key: value.cpu() for key, value in rollouts_batch.items()}
rollouts.append(rollouts_batch_cpu)
# Items in dict need to be of same shape.
rollouts = common.merge_dict(rollouts, merge_fn=torch.cat)
# Estimating advantages outside the loop gives more samples for reward normalization.
advantages = self._estimate_advantage(
rewards=rollouts["shaped_rewards"].to(self.accelerator.device),
values=rollouts["values"].to(self.accelerator.device),
)
advantages = {key: value.cpu() for key, value in advantages.items()}
return {**rollouts, **advantages}
def post_reward(self, reward_outputs: Dict[str, Tensor], responses: Tensor) -> Dict[str, Tensor]:
"""Assign bad reward values to sequences which didn't stop properly."""
if self.args.truncate_token_ids is None:
return reward_outputs
def get_validity_mask(sequences: Tensor, end_token_id: int) -> Tensor:
"""Mark a batch element as False if the sequence doesn't end with `end_token_id` after `truncate_after`."""
assert sequences.dim() == 2
validity_mask = []
for sequence in sequences:
(nonzeros,) = (sequence == end_token_id).nonzero(as_tuple=True)
if len(nonzeros) == 0:
validity_mask.append(False)
else:
validity_mask.append(
self.args.truncate_after is None
or
# Last occurrence of `end_token_id` is after `truncate_after`.
nonzeros[-1] > self.args.truncate_after
)
return torch.tensor(validity_mask, device=sequences.device)
validity_masks = [get_validity_mask(responses, end_token_id) for end_token_id in self.args.truncate_token_ids]
validity_mask = torch.stack(validity_masks).any(dim=0) # Sequence is valid if it ends with any end token.
rewards = reward_outputs["rewards"]
rewards[~validity_mask] = self.args.penalty_reward_value
return reward_outputs
def compute_loss(self, rollouts: Dict[str, Tensor]) -> Tuple[Tensor, Dict]:
values, old_logprob, returns, advantages, queries, query_attn_masks, responses = common.prepare_inputs(
common.unpack_dict(
rollouts,
keys=("values", "logprobs", "returns", "advantages", "queries", "query_attn_masks", "responses"),
),
device=self.accelerator.device,
)
outputs = self.policy(queries, query_attn_masks, responses, temperature=self.args.temperature)
vpred = outputs["values"]
vpredclipped = torch.clamp(
vpred,
min=values - self.args.cliprange_value,
max=values + self.args.cliprange_value,
)
vf_losses1 = (vpred - returns) ** 2.0
vf_losses2 = (vpredclipped - returns) ** 2.0
vf_loss = 0.5 * torch.maximum(vf_losses1, vf_losses2).mean()
vf_clipfrac = (vf_losses2 > vf_losses1).to(torch.get_default_dtype()).mean()
logprob = outputs["logprobs"]
ratio = torch.exp(logprob - old_logprob)
# When current policy is close to the old policy, the KL component of this advantage is approximately correct.
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, min=1.0 - self.args.cliprange, max=1.0 + self.args.cliprange)
pg_loss = torch.maximum(pg_losses, pg_losses2).mean()
pg_clipfrac = (pg_losses2 > pg_losses).to(torch.get_default_dtype()).mean() # noqa
loss = pg_loss + self.args.vf_coef * vf_loss
entropy = outputs["entropies"].mean()
approxkl = 0.5 * ((logprob - old_logprob) ** 2.0).mean()
return_mean, return_var = returns.mean(), returns.var(unbiased=False)
value_mean, value_var = values.mean(), values.var(unbiased=False)
stats = dict(
loss=dict(policy=pg_loss, value=vf_loss, total=loss),
policy=dict(entropy=entropy, approxkl=approxkl, clipfrac=pg_clipfrac),
returns=dict(mean=return_mean, var=return_var),
val=dict(
vpred=vpred.mean(),
error=((vpred - returns) ** 2).mean(),
clipfrac=vf_clipfrac,
mean=value_mean,
var=value_var,
),
)
return loss, common.flatten_dict(stats, sep="/", postprocess_fn=lambda x: x.detach())
def record_step_stats(self, train_stats, rollouts, step_idx, **kwargs):
kl = rollouts["kl"]
kl_sum_seq, kl_avg_seq = kl.sum(dim=1).mean(dim=0), kl.mean()
shaped_rewards = rollouts["shaped_rewards"].sum(dim=1).mean(dim=0)
non_score_rewards = rollouts["non_score_rewards"].sum(dim=1).mean(dim=0)
rewards = rollouts["rewards"].mean(dim=0)
stats = {
f"objective/kl_coef": kwargs["kl_coef"],
f"objective/kl_sum_seq": kl_sum_seq,
f"objective/kl_avg_seq": kl_avg_seq,
f"objective/shaped_rewards": shaped_rewards,
f"objective/non_score_rewards": non_score_rewards,
f"objective/rewards": rewards, # Original model reward.
f"objective/lr": self.optimizer.param_groups[0]["lr"],
f"objective/entropies": rollouts["entropies"].mean(),
f"objective/ref_entropies": rollouts["ref_entropies"].mean(),
}
for k, v in train_stats.items():
stats[f"ppo/{k}"] = v.mean(dim=0)
stats = {key: value.item() if torch.is_tensor(value) else value for key, value in stats.items()}
if self.accelerator.is_main_process:
self.accelerator.log(stats, step=step_idx)
if self.args.output_dir is not None:
# Store rollout data to disk to debug.
rollouts_to_disk = {
key: self.tokenizer.batch_decode(
tensor, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
for key, tensor in common.unpack_dict(
rollouts, keys=("queries", "responses"), return_type=dict
).items()
}
rollouts_to_disk = pd.DataFrame(rollouts_to_disk).to_dict(orient="records")
utils.jdump(rollouts_to_disk, utils.join(self.args.output_dir, "rollouts", f"step_{step_idx}.json"))
return stats
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None, give_rw_access=True, check_corrupted=True):
# We don't use accelerator here because, we want to be frugal and only store the policy.
# Moreover, we want easy loadability -- calling .from_pretrained on the folder. Full dump wouldn't allow this.
# Logic:
# 1. Retrieve the complete state dict of the wrapped model.
# (retrieving state dict of submodule can lead to loss of keys)
# 2. Remove keys that are part of the value network.
# 3. Rename keys that are part of the policy network, so that they match the naming standard.
output_dir = self.args.output_dir if output_dir is None else output_dir
utils.makedirs(output_dir)
model, tokenizer = self.policy, self.tokenizer
with FSDP.state_dict_type(
model, StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
):
logger.warning("Gathering full state_dict...")
state_dict = model.state_dict()
logger.warning("Finished gathering full state_dict...")
if self.accelerator.is_main_process:
# Retain and remap policy keys.
new_state_dict = dict()
prefix = "policy.base_model."
for key, value in state_dict.items():
if key.startswith(prefix):
new_state_dict[key[len(prefix) :]] = value
state_dict = new_state_dict
if check_corrupted: # Let the checks run on GPU.
is_corrupted = any(value.isnan().any().item() for value in state_dict.values())
logger.warning(f"Is there nans in the state_dict to be dumped? {is_corrupted}")
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
unwrapped = unwrap_model(model).policy.base_model
assert isinstance(
unwrapped, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)
), f"Expected to save a generative policy, but found model to be of type: {type(unwrapped)}."
if hasattr(unwrapped, "_keys_to_ignore_on_save"):
logger.warning(f"keys to ignore on save: {unwrapped._keys_to_ignore_on_save}")
logger.warning(f"Saving model checkpoint to {output_dir}")
logger.warning(f"Saving {len(cpu_state_dict)} keys:\n{utils.jdumps(cpu_state_dict.keys())}")
unwrapped.save_pretrained(output_dir, state_dict=cpu_state_dict)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, constants.TRAINING_ARGS_NAME))
if give_rw_access:
try:
os.system(f"chmod -R a+xwr {output_dir}")
except Exception as e:
logger.fatal(f"Failed to give read-write access to {output_dir}: {e}")
def _make_left_padded_tokenizer(
model_name_or_path: AnyPath,
cache_dir: AnyPathOrNone = constants.DEFAULT_CACHE_DIR,
**kwargs,
) -> transformers.PreTrainedTokenizer:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir=cache_dir,
padding_side="left",
**kwargs,
)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens(dict(pad_token=constants.DEFAULT_PAD_TOKEN))
return tokenizer
def make_tokenizer(args):
# policy_tokenizer left pads, since the policy requires batch decoding.
policy_tokenizer = _make_left_padded_tokenizer(
args.policy_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
# reward_tokenizer left pads, since we need the embedding of the right most non-pad token.
reward_tokenizer = _make_left_padded_tokenizer(
args.reward_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
if policy_tokenizer.get_vocab() != reward_tokenizer.get_vocab():
raise ValueError("AlpacaFarm does not support different tokenizer for policy and reward models.")
return policy_tokenizer
def make_models(
tokenizer: transformers.PreTrainedTokenizer,
args,
accelerator: accelerate.Accelerator,
) -> dict:
def make_generative_policy():
base_model = common.make_generative_lm(
model_name_or_path=args.policy_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
utils.stable_resize_token_embeddings(base_model, len(tokenizer))
return base_model
def make_reward_model():
return reward_model_module.RewardModel.from_pretrained(
args.reward_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
# Model construction below seems convoluted, but it's made to trade time for RAM efficiency.
# For large models, object creation could be extremely RAM intensive.
# Especially so for multiple processes on single node, each starting off with a copy of the model.
# General strategy is to 1) create a model, 2) move it to target device / shard it, 3) then start next model,
# as opposed to creating all needed models on CPU first, and separately moving / sharding each.
policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
if args.init_value_with_reward:
# Initialize value from reward model a la OAI.
logger.warning("Initializing value model with reward model.")
value_model = rl_models.make_value_with_base_model(args, make_reward_model().backbone_model, tokenizer)
else:
logger.warning("Initializing value model with policy model.")
# Initialize value from policy. Works for sanity, but generally performs worse in instruction-following.
value_model = rl_models.make_value_with_base_model(args, make_generative_policy(), tokenizer)
actor_critic = rl_models.ActorCritic(policy=policy, value_model=value_model)
# We cast how respond should run. It's important the dtypes be consistent with training, since a bf16
# fine-tuned model might not work with fp16 inference.
# Cast step below must precede accelerator.prepare(), since wrapped model might not have `respond` method.
actor_critic = common.prepare_model_for_custom_fn(model=actor_critic, fn_name="respond", accelerator=accelerator)
actor_critic = accelerator.prepare(actor_critic) # noqa
ref_policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
ref_policy.requires_grad_(False)
ref_policy = accelerator.prepare(ref_policy) # noqa
reward_model = make_reward_model()
reward_model.requires_grad_(False)
reward_model = accelerator.prepare(reward_model)
# TODO: This is a hack to get FSDP running. Remove in the future when this is fixed.
if accelerator.distributed_type == accelerate.DistributedType.FSDP:
inputs = tokenizer("fsdp are you happy now??? :)" * 50, return_tensors="pt")
inputs = {key: value.to(accelerator.device) for key, value in inputs.items()}
actor_critic(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
return dict(policy=actor_critic, ref_policy=ref_policy, reward_model=reward_model)
| alpaca_farm-main | src/alpaca_farm/rl/ppo_trainer.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from pathlib import Path
from typing import Callable, Dict, Optional, Sequence, Tuple
import torch
import torch.distributed as dist
import tqdm
import transformers
from accelerate import DistributedType
from accelerate.optimizer import AcceleratedOptimizer
from torch import nn
from torch.distributed.fsdp import ShardingStrategy
from torch.utils.data import DataLoader, TensorDataset
from transformers.trainer_utils import enable_full_determinism, set_seed
from .. import accelerate_patch, common, data_preprocessor, logging, trainer_utils, utils
from ..inference import decode, score
from ..types import LRScheduler, Tensor
from . import kl_controller
FIRST_STEP_IDX = 1
logger = logging.get_logger(__name__)
class RLTrainer(object):
def __init__(
self,
args,
train_dataset: data_preprocessor.QueryDataset,
eval_dataset: data_preprocessor.QueryDataset,
data_collator: Callable,
policy: nn.Module,
ref_policy: nn.Module,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super(RLTrainer, self).__init__()
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.data_collator = data_collator
self.policy = policy
self.ref_policy = ref_policy
self.reward_model = reward_model
self.tokenizer = tokenizer
self.optimizer = optimizer
self.accelerator = accelerator
self.lr_scheduler = lr_scheduler
self.kl_ctl = kl_controller.make_kl_controller(args, self.accelerator)
self.log_history = []
self.args.set_truncate_token_ids(self.tokenizer)
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
@abc.abstractmethod
@torch.inference_mode()
def rollout(self, queries_data) -> Dict[str, Tensor]:
raise NotImplementedError
@abc.abstractmethod
def compute_loss(self, rollouts: Dict[str, Tensor]) -> Tuple[Tensor, Dict]:
raise NotImplementedError
@abc.abstractmethod
@torch.inference_mode()
def record_step_stats(self, train_stats, rollouts, step_idx, **kwargs):
raise NotImplementedError
@property
def optimizable_params(self):
return [p for p in self.policy.parameters() if p.requires_grad and p.grad is not None]
@torch.inference_mode()
def _compute_grad_norm(self):
grad_norm = torch.stack([p.grad.norm(2) for p in self.optimizable_params]).norm(2)
if (
self.accelerator.distributed_type == DistributedType.FSDP
and self.policy.sharding_strategy != ShardingStrategy.NO_SHARD
):
# When parameters are sharded, we need to gather each grad norm and then aggregate.
grad_norms = [torch.zeros_like(grad_norm) for _ in range(self.accelerator.num_processes)]
dist.all_gather(grad_norms, grad_norm)
grad_norm = torch.stack(grad_norms).norm(2)
return grad_norm
@torch.inference_mode()
def _compute_param_norm(self):
param_norm = torch.stack([p.norm(2) for p in self.optimizable_params]).norm(2)
if (
self.accelerator.distributed_type == DistributedType.FSDP
and self.policy.sharding_strategy != ShardingStrategy.NO_SHARD
):
# When parameters are sharded, we need to gather each grad norm and then aggregate.
param_norms = [torch.zeros_like(param_norm) for _ in range(self.accelerator.num_processes)]
dist.all_gather(param_norms, param_norm)
param_norm = torch.stack(param_norms).norm(2)
return param_norm
def _make_fsdp_happy(self):
"""Simply do a forward pass with the wrapped model at first.
FSDP has some weird bugs; need this flush before running a non-forward method!
This function should assume grad context of caller and
not be wrapped with `torch.no_grad` or `torch.enable_grad`!!!
"""
if self.accelerator.distributed_type == DistributedType.FSDP:
inputs = self.tokenizer("fsdp are you happy now? :)" * 50, return_tensors="pt")
inputs = common.prepare_inputs(inputs, device=self.accelerator.device)
self.policy(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
def step_with_rollouts(self, rollouts):
"""Based on fixed rollouts, run PPO for multiple epochs."""
assert isinstance(self.optimizer, AcceleratedOptimizer), (
"`optimizer` must be pushed through `accelerator.prepare`. "
"Otherwise the `accelerator.accumulate` context manager won't correctly disable `zero_grad` or `step`."
)
rollouts_dataloader = self.get_rollouts_dataloader(rollouts=rollouts)
stats_list = []
for epoch_idx in range(self.args.noptepochs):
for batch_idx, rollouts_batch in tqdm.tqdm(
enumerate(rollouts_dataloader, 1), disable=not self.accelerator.is_main_process, desc="gradstep"
):
with self.accelerator.accumulate(self.policy):
ppo_loss, stats_for_this_step = self.compute_loss(rollouts_batch)
self.accelerator.backward(ppo_loss)
if self.accelerator.sync_gradients:
# Gradient norm almost blows up at some point, but stabilizes eventually, even w/o clipping.
if self.args.max_grad_norm is not None:
self.accelerator.clip_grad_norm_(self.policy.parameters(), self.args.max_grad_norm)
stats_for_this_step["loss/grad_norm"] = self._compute_grad_norm()
stats_list.append(stats_for_this_step)
self.optimizer.step()
self.optimizer.zero_grad(set_to_none=True)
return common.merge_dict(stats_list, torch.stack) # list of dict -> dict: str -> 1-D tensor
def step(self, train_dataloader, step_idx: int):
queries_batches = [next(train_dataloader) for _ in range(self.args.rollout_accumulation_steps)]
rollouts = self.rollout(queries_batches)
train_stats = self.step_with_rollouts(rollouts)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
stats = self.record_step_stats(
rollouts=rollouts, train_stats=train_stats, step_idx=step_idx, kl_coef=self.kl_ctl.value
)
self.kl_ctl.step(stats["objective/kl_sum_seq"])
return stats
def create_optimizer_and_scheduler(self, num_training_steps: int):
optimizer = trainer_utils.create_optimizer(args=self.args, model=self.policy, optimizer=self.optimizer)
lr_scheduler = trainer_utils.create_scheduler(
args=self.args, optimizer=optimizer, lr_scheduler=self.lr_scheduler, num_training_steps=num_training_steps
)
self.optimizer, self.lr_scheduler = self.accelerator.prepare(optimizer, lr_scheduler)
self.accelerator.register_for_checkpointing(self.lr_scheduler) # LR scheduler needs another call to save.
return self.optimizer, self.lr_scheduler
def train(self):
"""Entry point for training."""
total_epochs = self.args.total_epochs
total_episodes = len(self.train_dataset) * total_epochs # noqa
total_steps = total_episodes // self.args.rollout_batch_size # noqa
logger.warning(
f"***Training starts***\n"
f"Total epochs: {total_epochs} => Total episodes: {total_episodes} => Total steps: {total_steps}"
)
self.create_optimizer_and_scheduler(total_steps)
infinite_train_dataloader = self.get_train_dataloader()
for step_idx in tqdm.tqdm(
range(FIRST_STEP_IDX, total_steps + FIRST_STEP_IDX),
disable=not self.accelerator.is_main_process,
desc="steps",
total=total_steps,
):
if step_idx % self.args.save_steps == 0 or step_idx in self.args.save_steps_extra_list:
self.save_model(utils.join(self.args.output_dir, f"checkpoint-{step_idx}"))
if self.args.eval_steps is not None and step_idx % self.args.eval_steps == 0:
self.evaluate(step_idx)
self.log_history.append(self.step(infinite_train_dataloader, step_idx))
return self.log_history
@torch.inference_mode()
def evaluate(self, step_idx: int, unwrapped_policy=None):
"""Evaluate by generating sequences with test prefixes.
FSDP compat: all devices should do the forward pass, since sharded params need to be summoned.
only write results in the main process.
"""
# TODO: unhardcode inference args.
logger.warning(f"Start evaluation at step: {step_idx}", main_process_only=True)
prompts, list_dict_data = self.eval_dataset.prompts, self.eval_dataset.list_dict_data
if any(item is None for item in (prompts, list_dict_data)):
logger.warning("No evaluation data, skipping evaluation.", main_process_only=True)
return
# Constants.
model_name = Path(self.args.output_dir).stem # Don't use the helper in common, as no checkpoint is saved yet.
model_name_at_step = f"{model_name}_ckpt_{step_idx}"
temperature = 0.7
del model_name
# Start evaluation.
self.policy.eval()
self._make_fsdp_happy()
if unwrapped_policy is None:
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
unwrapped_policy = unwrapped_policy.policy.base_model
outputs = decode.decode_prompts_with_huggingface_given_model(
model=unwrapped_policy,
tokenizer=self.tokenizer,
prompts=prompts,
decoding_args=decode.HFDecodingArguments(max_new_tokens=self.args.response_len, temperature=temperature),
per_device_batch_size=self.args.per_device_eval_batch_size,
divide_work=False,
)
sequences = [i + o for i, o in utils.zip_(prompts, outputs)]
rewards = score.score_sequences_with_huggingface_given_model(
model=self.reward_model,
tokenizer=self.tokenizer,
sequences=sequences,
per_device_batch_size=self.args.rollout_per_device_batch_size,
divide_work=False,
)
if self.accelerator.is_main_process:
results = [
{"reward": reward, model_name_at_step: output, **example}
for reward, output, example in utils.zip_(rewards, outputs, list_dict_data)
]
if self.args.output_dir is not None:
utils.jdump(results, utils.join(self.args.output_dir, f"eval_results_{step_idx}.json"))
logger.warning(f"End evaluation at step: {step_idx}. Processed {len(results)} examples")
@abc.abstractmethod
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None):
raise NotImplementedError
def _log_batch_size(self, loader: DataLoader, loader_name):
batch = next(iter(loader))
if isinstance(batch, torch.Tensor):
batch_size = batch.shape[0]
elif isinstance(batch, (list, tuple)):
batch_size = batch[0]
else:
tensor = list(batch.values())[0]
batch_size = tensor.size(0)
logger.warning(f"Batch size of {loader_name} dataloader: {batch_size}", main_process_only=True)
def get_train_dataloader(self):
logger.warning(f"Train dataset size: {len(self.train_dataset)}", main_process_only=True) # noqa
train_dataloader = DataLoader(
dataset=self.train_dataset,
collate_fn=self.data_collator,
batch_size=self.args.rollout_per_device_batch_size,
shuffle=True,
drop_last=True,
)
train_dataloader = self.accelerator.prepare(train_dataloader) # noqa
self._log_batch_size(train_dataloader, "train_dataloader")
return utils.InfiniteLoader(train_dataloader)
def get_rollouts_dataloader(self, rollouts: Dict[str, Tensor], shuffle=True, drop_last=True, keys=None):
if keys is None:
keys = tuple(rollouts.keys())
def collate_rollouts(instances: Sequence[tuple]):
return {key: torch.stack([instance[idx] for instance in instances]) for idx, key in enumerate(keys)}
rollouts_dataset = TensorDataset(*[rollouts[key] for key in keys])
rollouts_dataloader = DataLoader(
dataset=rollouts_dataset,
batch_size=self.args.step_per_device_batch_size,
collate_fn=collate_rollouts,
shuffle=shuffle,
drop_last=drop_last,
)
# Do not prepare, since we don't need to shard the rollouts sampled on each batch.
return rollouts_dataloader
| alpaca_farm-main | src/alpaca_farm/rl/rl_trainer.py |
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import transformers
from alpaca_farm import distributed_utils
from .. import constants, logging
logger = logging.get_logger(__name__)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: str = field(default="alpaca_instructions")
train_splits: List[str] = field(default_factory=lambda: ["unlabeled"])
eval_splits: List[str] = field(default_factory=lambda: ["val"])
prompt_dict_path: str = field(
default=None,
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
wandb_project: str = field(default=constants.WANDB_PROJECT)
cache_dir: Optional[str] = field(default=constants.DEFAULT_CACHE_DIR)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
truncate_tokens: Optional[List[str]] = field(
default_factory=lambda: None,
metadata={
"help": "Tokens in strings to truncate at first occurrence. "
"This was used in original OAI summarization paper to avoid models returning incomplete sentences. "
},
)
truncate_after: Optional[int] = field(
default=None, metadata={"help": "Truncate after this number of tokens. Prevents early truncation."}
)
penalty_reward_value: float = field(
default=-1.0,
metadata={
"help": "Reward assigned to sequences that are truncated, "
"e.g., due to outputting incomplete sentences for given context window."
},
)
total_epochs: int = field(default=10)
rollout_batch_size: int = field(default=512)
step_batch_size: int = field(default=256)
rollout_per_device_batch_size: int = field(default=32)
step_per_device_batch_size: int = field(default=2)
noptepochs: int = field(default=2)
vf_coef: float = field(default=0.1)
cliprange: float = field(default=0.2)
cliprange_value: float = field(default=0.2)
gamma: float = field(default=1.0)
lam: float = field(default=1.0)
whiten_rewards: bool = field(default=True)
adam_epsilon: float = field(
default=1e-5,
metadata={
"help": "Epsilon for AdamW optimizer. "
"This is the default for OAI PPO code and UW Quark code. "
"This is not the Hugging Face default."
},
)
temperature: float = field(default=1.0)
kl_coef: float = field(default=0.2)
target_kl: float = field(default=6.0)
k_beta: float = field(default=0.1)
adaptive_kl: bool = field(default=False)
eval_batches: int = field(default=sys.maxsize, metadata={"help": "Maximum number of batches to evaluate on."})
init_value_with_reward: bool = field(
default=True, metadata={"help": "Initialize the value model with the reward model."}
)
save_steps_extra: Optional[str] = field(
default=None,
metadata={
"help": "A list of predetermined checkpoints to save, represented in the format 'no1__no2__no3'. "
"Parse this with str.split('__')."
},
)
query_len: int = field(default=192)
response_len: int = field(default=300)
policy_model_name_or_path: str = field(default=None)
reward_model_name_or_path: str = field(default=None)
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def __post_init__(self):
# Super class' __post_init__ is very complicated; don't do super for now in case mess something up.
# super().__post_init__()
if self.tf32: # super().__post_init__() actually does this.
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = True # noqa
world_size = distributed_utils.get_world_size()
# Checks on rollout_batch_size only matter for PPO.
assert self.rollout_batch_size >= self.rollout_per_device_batch_size * world_size, (
"rollout_batch_size is smaller than rollout_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.rollout_batch_size % (self.rollout_per_device_batch_size * world_size) == 0
), "rollout_batch_size is not a multiple of rollout_per_device_batch_size * world_size. "
assert self.step_batch_size >= self.step_per_device_batch_size * world_size, (
"step_batch_size is smaller than step_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.step_batch_size % (self.step_per_device_batch_size * world_size) == 0
), "step_batch_size is not a multiple of step_per_device_batch_size * world_size. "
logger.warning(
f"Rollout stats:\n"
f"\trollout_batch_size: {self.rollout_batch_size}\n"
f"\trollout_per_device_batch_size: {self.rollout_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.rollout_batch_size // self.rollout_per_device_batch_size) % world_size == 0
self.rollout_accumulation_steps = self.rollout_batch_size // self.rollout_per_device_batch_size // world_size
logger.warning(
f"Step stats:\n"
f"\tstep_batch_size: {self.step_batch_size}\n"
f"\tstep_per_device_batch_size: {self.step_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.step_batch_size // self.step_per_device_batch_size) % world_size == 0
self.gradient_accumulation_steps = self.step_batch_size // self.step_per_device_batch_size // world_size
logger.warning(
f"Accumulation steps:\n"
f"\trollout_accumulation_steps: {self.rollout_accumulation_steps}\n"
f"\tgradient_accumulation_steps: {self.gradient_accumulation_steps}\n"
)
if self.save_steps_extra is not None:
self.save_steps_extra_list = [int(string) for string in self.save_steps_extra.split("__")]
else:
self.save_steps_extra_list = []
def set_truncate_token_ids(self, tokenizer: transformers.PreTrainedTokenizer):
"""Convert truncation token to token ids.
This is called in RLTrainer.
"""
truncate_tokens = self.truncate_tokens
if truncate_tokens is None:
truncate_token_ids = None
else:
truncate_token_ids = tokenizer.convert_tokens_to_ids(truncate_tokens)
self.truncate_token_ids = truncate_token_ids
| alpaca_farm-main | src/alpaca_farm/rl/ppo_utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.