python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import re
import os
import sympy
import pandas as pd
from tasks.base import Task, DATA_PATH
from prompts.game24 import *
def get_current_numbers(y: str) -> str:
last_line = y.strip().split('\n')[-1]
return last_line.split('left: ')[-1].split(')')[0]
class Game24Task(Task):
"""
Input (x) : a string of 4 numbers
Output (y) : a trajectory of 3 steps to reach 24
Reward (r) : 0 or 1, depending on whether the trajectory is correct
Input Example:
1 2 3 4
Output Example:
1 + 2 = 3 (left: 3 3 4)
3 + 3 = 6 (left: 4 6)
6 * 4 = 24 (left: 24)
(1 + 2 + 3) * 4 = 24
"""
def __init__(self, file='24.csv'):
"""
file: a csv file (fixed)
"""
super().__init__()
path = os.path.join(DATA_PATH, '24', file)
self.data = list(pd.read_csv(path)['Puzzles'])
self.value_cache = {}
self.steps = 4
self.stops = ['\n'] * 4
def __len__(self) -> int:
return len(self.data)
def get_input(self, idx: int) -> str:
return self.data[idx]
def test_output(self, idx: int, output: str):
expression = output.strip().split('\n')[-1].lower().replace('answer: ', '').split('=')[0]
numbers = re.findall(r'\d+', expression)
problem_numbers = re.findall(r'\d+', self.data[idx])
if sorted(numbers) != sorted(problem_numbers):
return {'r': 0}
try:
# print(sympy.simplify(expression))
return {'r': int(sympy.simplify(expression) == 24)}
except Exception:
# print(e)
return {'r': 0}
@staticmethod
def standard_prompt_wrap(x: str, y:str='') -> str:
return standard_prompt.format(input=x) + y
@staticmethod
def cot_prompt_wrap(x: str, y:str='') -> str:
return cot_prompt.format(input=x) + y
@staticmethod
def propose_prompt_wrap(x: str, y: str='') -> str:
current_numbers = get_current_numbers(y if y else x)
if current_numbers == '24':
prompt = cot_prompt.format(input=x) + 'Steps:' + y
# print([prompt])
else:
prompt = propose_prompt.format(input=current_numbers)
return prompt
@staticmethod
def value_prompt_wrap(x: str, y: str) -> str:
last_line = y.strip().split('\n')[-1]
if 'left: ' not in last_line: # last step
ans = last_line.lower().replace('answer: ', '')
# print([value_last_step_prompt.format(input=x, answer=ans)])
return value_last_step_prompt.format(input=x, answer=ans)
current_numbers = get_current_numbers(y)
return value_prompt.format(input=current_numbers)
@staticmethod
def value_outputs_unwrap(x: str, y: str, value_outputs: list) -> float:
if len(y.strip().split('\n')) == 4 and 'answer' not in y.lower():
return 0
value_names = [_.split('\n')[-1] for _ in value_outputs]
value_map = {'impossible': 0.001, 'likely': 1, 'sure': 20} # TODO: ad hoc
value = sum(value * value_names.count(name) for name, value in value_map.items())
return value | tree-of-thoughts-main | experiements/tree-of-thought-llm/tasks/game24.py |
import os
import re
from tasks.base import Task, DATA_PATH
from prompts.text import *
from models import gpt
class TextTask(Task):
"""
Input (x) : a text instruction
Output (y) : a text generation
Reward (r) : # TODO
Input Example:
Output Example:
"""
def __init__(self, file='data_100_random_text.txt'):
"""
file: a text file, each line is some sentences
"""
super().__init__()
path = os.path.join(DATA_PATH, 'text', file)
self.data = open(path).readlines()
self.steps = 2
self.stops = ['\nPassage:\n', None]
def __len__(self) -> int:
return len(self.data)
def get_input(self, idx: int) -> str:
return self.data[idx]
def test_output(self, idx: int, output: str):
output = output.split('Passage:\n')[-1]
prompt = score_prompt + output
score_outputs = gpt(prompt, n=5, model='gpt-4')
scores = []
for score_output in score_outputs:
# print(score_output)
pattern = r".*coherency score is (\d+).*"
match = re.match(pattern, score_output, re.DOTALL)
if match:
score = int(match.groups()[0])
scores.append(score)
else:
print(f'------------------score no match: {[score_output]}')
print(scores)
# print('------------')
info = {'rs': scores, 'r': sum(scores) / len(scores) if scores else 0}
return info
@staticmethod
def standard_prompt_wrap(x: str, y:str='') -> str:
return standard_prompt.format(input=x) + y
@staticmethod
def cot_prompt_wrap(x: str, y:str='') -> str:
return cot_prompt.format(input=x) + y
@staticmethod
def vote_prompt_wrap(x: str, ys: list) -> str:
prompt = vote_prompt
for i, y in enumerate(ys, 1):
# y = y.replace('Plan:\n', '')
# TODO: truncate the plan part?
prompt += f'Choice {i}:\n{y}\n'
return prompt
@staticmethod
def vote_outputs_unwrap(vote_outputs: list, n_candidates: int) -> list:
vote_results = [0] * n_candidates
for vote_output in vote_outputs:
pattern = r".*best choice is .*(\d+).*"
match = re.match(pattern, vote_output, re.DOTALL)
if match:
vote = int(match.groups()[0]) - 1
if vote in range(n_candidates):
vote_results[vote] += 1
else:
print(f'vote no match: {[vote_output]}')
return vote_results
@staticmethod
def compare_prompt_wrap(x: str, ys: list) -> str:
assert len(ys) == 2, 'compare prompt only supports 2 candidates'
ys = [y.split('Passage:\n')[-1] for y in ys]
prompt = compare_prompt + f'Passage 1:\n{ys[0]}\n\nPassage 2:\n{ys[1]}\n'
return prompt
@staticmethod
def compare_output_unwrap(compare_output: str):
if 'more coherent passage is 1' in compare_output:
return 0
elif 'more coherent passage is 2' in compare_output:
return 1
elif 'two passages are similarly coherent' in compare_output:
return 0.5
else:
print(f'-----------------compare no match: {[compare_output]}')
return -1 | tree-of-thoughts-main | experiements/tree-of-thought-llm/tasks/text.py |
import re
import json
from tasks.base import Task
from prompts.crosswords import *
from models import gpt
class MiniCrosswordsEnv:
def __init__(self, file='mini0505.json'):
self.file = f'data/crosswords/{file}'
self.file = json.load(open(self.file))
self.n = len(self.file)
self.cache = {}
self.idx = None
self.times = 0
self.prompt_status_cache = {}
def __len__(self):
return self.n
def reset(self, idx, board=None, status=None, steps=None):
self.idx = idx
self.data, self.board_gt = self.file[idx]
self.board = ['_'] * 25
self.ans = ['_____'] * 10
self.ans_gt = self.get_ans(self.board_gt)
self.steps = 0
self.status = [0] * 10 # 0: unfilled; 1: filled; 2: filled then changed
if board is not None:
self.board = board
self.ans = self.get_ans(self.board)
if status is not None:
self.status = status
if steps is not None:
self.steps = steps
return self.render()
def prompt_status(self):
count = {'sure': 0, 'maybe': 0, 'impossible': 0}
for ans, data, status in zip(self.ans, self.data, self.status):
# if status != 0: continue
if ans.count('_') >= 4: continue
ans = ' '.join(ans.lower())
line = f'{data}: {ans}'
prompt = value_prompt.format(input=line)
if prompt in self.prompt_status_cache:
res = self.prompt_status_cache[prompt]
else:
res = gpt(prompt)[0]
self.prompt_status_cache[prompt] = res
# print(line)
# print(res)
# print()
res = res.split('\n')[-1].strip()
if res in count: count[res] += 1
# print(count)
return count
def render_gt_board(self):
s = "GT Board:\n"
for i in range(5):
s += ' '.join(self.board_gt[i*5:(i+1)*5]) + '\n'
return s
def render_board(self):
s = "Current Board:\n"
for i in range(5):
s += ''.join(self.board[i*5:(i+1)*5]) + '\n'
return s
def render_clues(self, status=None):
s = ""
# s += "Horizontal:\n"
for i in range(5):
if status is None or self.status[i] == status:
s += 'h' + str(i+1) + '. ' + self.data[i] + '\n'
# s += "Vertical:\n"
for i in range(5, 10):
if status is None or self.status[i] == status:
s += 'v' + str(i-5+1) + '. ' + self.data[i] + '\n'
return s
def render_ans(self, status=None):
s = ""
# s += "Horizontal:\n"
for i in range(5):
if status is None or self.status[i] == status:
s += 'h' + str(i+1) + '. ' + self.data[i] + ': ' + self.ans[i] + '\n'
# s += "Vertical:\n"
for i in range(5, 10):
if status is None or self.status[i] == status:
s += 'v' + str(i-5+1) + '. ' + self.data[i] + ': ' + self.ans[i] + '\n'
return s
def render_gt_ans(self, status=None):
s = ""
# s += "Horizontal:\n"
for i in range(5):
if status is None or self.status[i] == status:
s += 'h' + str(i+1) + '. ' + self.data[i] + ': ' + self.ans_gt[i] + '\n'
# s += "Vertical:\n"
for i in range(5, 10):
if status is None or self.status[i] == status:
s += 'v' + str(i-5+1) + '. ' + self.data[i] + ': ' + self.ans_gt[i] + '\n'
return s
def render(self, status=True):
if status:
return self.render_board() + '\nUnfilled:\n' + self.render_ans(status=0) + '\nFilled:\n' + self.render_ans(status=1) + '\nChanged:\n' + self.render_ans(status=2)
else:
return self.render_board() + '\n' + self.render_ans()
def get_ans(self, board):
ans = [''] * 10
for i in range(5):
ans[i] = ''.join(board[i*5:(i+1)*5])
for i in range(5):
ans[i+5] = ''.join(board[i::5])
return ans
def step(self, action):
self.steps += 1
action = action.split('\n')[-1]
action = action.split('. ')
if len(action) != 2:
return 'Invalid! Format should be like "h1. apple"', 0, False, {}
pos, word = action
if len(word) != 5:
return 'Invalid! Word should have 5 letters.', 0, False, {}
if pos.startswith('h'):
idx = int(pos[1:]) - 1
self.board[idx*5:(idx+1)*5] = list(word.upper())
elif pos.startswith('v'):
idx = int(pos[1:]) - 1
self.board[idx::5] = list(word.upper())
idx += 5 # for later status update
else:
return 'Invalid! Position should be h1-h5 or v1-v5', 0, False, {}
self.new_ans = self.get_ans(self.board)
# self.status = [2 if (status == 1 and ans != new_ans) else status for status, ans, new_ans in zip(self.status, self.ans, self.new_ans)]
self.status = [2 if any(letter != new_letter and letter != '_' for letter, new_letter in zip(ans, new_ans)) else status for status, ans, new_ans in zip(self.status, self.ans, self.new_ans)]
self.status[idx] = 1
self.ans = self.new_ans
r_all = (self.board == self.board_gt)
r_letter = sum(a == b for a, b in zip(self.board, self.board_gt)) / 25
r_word = sum(a == b for a, b in zip(self.ans, self.ans_gt)) / 10
return self.render(), r_all, (r_all or self.steps >= 20), {'r_letter': r_letter, 'r_word': r_word, 'r_game': r_all}
class MiniCrosswordsTask(Task):
"""
Input (x) : Decription of a 5x5 mini crossword
Output (y) : List of 10 words to fill in the crossword
Reward (r) : word level and game level
Input Example:
Output Example:
"""
def __init__(self, file):
"""
file: a csv file (fixed)
"""
super().__init__()
self.env = MiniCrosswordsEnv(file) # use it as a stateless tool
self.xs = []
for idx in range(len(self.env)):
self.env.reset(idx)
self.xs.append(self.env.render_clues())
self.steps = 10 # TODO: variable steps??
self.cache_proposals = {}
def __len__(self) -> int:
return len(self.env)
def get_input(self, idx: int) -> str:
self.env.reset(idx)
return self.env.render_clues()
# def test_output(self, idx: int, output: str): # TODO: r_word for now
# self.env.reset(idx)
# info = {'r_word': 0}
# for line in output.split('\n'):
# if line.startswith('h') or line.startswith('v'):
# _, _, _, info = self.env.step(line)
# return info['r_word']
def test_output(self, idx: int, output: str):
self.env.reset(idx)
output = output.split('Output:\n')[-1]
info = {'r_word': 0, 'r_letter': 0, 'r_game': 0}
for i, line in enumerate(output.strip().split('\n')[-5:], 1):
letters = line.split(' ')[:5]
word = ''.join(letters)
word = word + '_' * (5 - len(word))
action = f'h{i}. {word}'
# print(action)
_, _, _, info = self.env.step(action)
info['r'] = info['r_word']
return info
def set_status(self, x: str, y: str):
idx = self.xs.index(x)
self.test_output(idx, y) # update self.env
@staticmethod
def standard_prompt_wrap(x: str, y:str='') -> str:
return standard_prompt.format(input=x) + y
@staticmethod
def cot_prompt_wrap(x: str, y:str='') -> str:
return cot_prompt.format(input=x) + y
def propose_prompt_wrap(self, x: str, y: str='') -> str:
self.set_status(x, y)
return propose_prompt.format(input=self.env.render())
def propose_outputs_unwrap(self, x: str, y: str, outputs: list, n_max_propose: int) -> list:
confidence_to_value = {'certain': 1, 'high': 0.5, 'medium': 0.2, 'low': 0.1} # TODO: ad hoc
proposals_to_scores = {}
for output in outputs:
lines = output.split('\n')
pattern = r'^([hv][1-5])\. ([a-zA-Z]{5,5}) \((certain|high|medium|low)\).*$'
for line in lines:
match = re.match(pattern, line)
if match:
parts = [match.group(1), match.group(2), match.group(3)]
proposal = parts[0].lower() + '. ' + parts[1].lower()
score = confidence_to_value.get(parts[2], 0)
proposals_to_scores[proposal] = proposals_to_scores.get(proposal, 0) + score
proposals = sorted(proposals_to_scores.items(), key=lambda x: x[1], reverse=True)
if n_max_propose != -1:
proposals = proposals[:n_max_propose]
proposals = [y + proposal[0] + '\n' for proposal in proposals]
self.cache_proposals[(x, y, n_max_propose)] = proposals
return proposals
def evaluate(self, x: str, y: str, n_evaluate_sample: int) -> int:
self.set_status(x, y)
assert n_evaluate_sample == 1 # TODO: ad hoc
count = {'sure': 0, 'maybe': 0, 'impossible': 0}
for ans, data, status in zip(self.env.ans, self.env.data, self.env.status):
if ans.count('_') >= 4: continue
ans = ' '.join(ans.lower())
line = f'{data}: {ans}'
prompt = value_prompt.format(input=line)
res = gpt(prompt)[0]
print(line)
print(res)
print()
res = res.split('\n')[-1].strip()
if res in count: count[res] += 1
print(count)
return count | tree-of-thoughts-main | experiements/tree-of-thought-llm/tasks/crosswords.py |
DATA_PATH = './data'
class Task:
def __init__(self):
pass
def __len__(self) -> int:
pass
def get_input(self, idx: int) -> str:
pass
def test_output(self, idx: int, output: str):
pass | tree-of-thoughts-main | experiements/tree-of-thought-llm/tasks/base.py |
# 5-shot
standard_prompt = '''Use numbers and basic arithmetic operations (+ - * /) to obtain 24.
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) = 24
Input: 2 9 10 12
Answer: 2 * 12 * (10 - 9) = 24
Input: 4 9 10 13
Answer: (13 - 9) * (10 - 4) = 24
Input: 1 4 8 8
Answer: (8 / 4 + 1) * 8 = 24
Input: 5 5 5 9
Answer: 5 + 5 + 5 + 9 = 24
Input: {input}
'''
# 5-shot
cot_prompt = '''Use numbers and basic arithmetic operations (+ - * /) to obtain 24. Each step, you are only allowed to choose two of the remaining numbers to obtain a new number.
Input: 4 4 6 8
Steps:
4 + 8 = 12 (left: 4 6 12)
6 - 4 = 2 (left: 2 12)
2 * 12 = 24 (left: 24)
Answer: (6 - 4) * (4 + 8) = 24
Input: 2 9 10 12
Steps:
12 * 2 = 24 (left: 9 10 24)
10 - 9 = 1 (left: 1 24)
24 * 1 = 24 (left: 24)
Answer: (12 * 2) * (10 - 9) = 24
Input: 4 9 10 13
Steps:
13 - 10 = 3 (left: 3 4 9)
9 - 3 = 6 (left: 4 6)
4 * 6 = 24 (left: 24)
Answer: 4 * (9 - (13 - 10)) = 24
Input: 1 4 8 8
Steps:
8 / 4 = 2 (left: 1 2 8)
1 + 2 = 3 (left: 3 8)
3 * 8 = 24 (left: 24)
Answer: (1 + 8 / 4) * 8 = 24
Input: 5 5 5 9
Steps:
5 + 5 = 10 (left: 5 9 10)
10 + 5 = 15 (left: 9 15)
15 + 9 = 24 (left: 24)
Answer: ((5 + 5) + 5) + 9 = 24
Input: {input}
'''
# 1-shot
propose_prompt = '''Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: {input}
Possible next steps:
'''
value_prompt = '''Evaluate if given numbers can reach 24 (sure/likely/impossible)
10 14
10 + 14 = 24
sure
11 12
11 + 12 = 23
12 - 11 = 1
11 * 12 = 132
11 / 12 = 0.91
impossible
4 4 10
4 + 4 + 10 = 8 + 10 = 18
4 * 10 - 4 = 40 - 4 = 36
(10 - 4) * 4 = 6 * 4 = 24
sure
4 9 11
9 + 11 + 4 = 20 + 4 = 24
sure
5 7 8
5 + 7 + 8 = 12 + 8 = 20
(8 - 5) * 7 = 3 * 7 = 21
I cannot obtain 24 now, but numbers are within a reasonable range
likely
5 6 6
5 + 6 + 6 = 17
(6 - 5) * 6 = 1 * 6 = 6
I cannot obtain 24 now, but numbers are within a reasonable range
likely
10 10 11
10 + 10 + 11 = 31
(11 - 10) * 10 = 10
10 10 10 are all too big
impossible
1 3 3
1 * 3 * 3 = 9
(1 + 3) * 3 = 12
1 3 3 are all too small
impossible
{input}
'''
value_last_step_prompt = '''Use numbers and basic arithmetic operations (+ - * /) to obtain 24. Given an input and an answer, give a judgement (sure/impossible) if the answer is correct, i.e. it uses each input exactly once and no other numbers, and reach 24.
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) = 24
Judge:
sure
Input: 2 9 10 12
Answer: 2 * 12 * (10 - 9) = 24
Judge:
sure
Input: 4 9 10 13
Answer: (13 - 9) * (10 - 4) = 24
Judge:
sure
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) + 1 = 25
Judge:
impossible
Input: 2 9 10 12
Answer: 2 * (12 - 10) = 24
Judge:
impossible
Input: 4 9 10 13
Answer: (13 - 4) * (10 - 9) = 24
Judge:
impossible
Input: {input}
Answer: {answer}
Judge:''' | tree-of-thoughts-main | experiements/tree-of-thought-llm/prompts/game24.py |
standard_prompt = '''
Write a coherent passage of 4 short paragraphs. The end sentence of each paragraph must be: {input}
'''
cot_prompt = '''
Write a coherent passage of 4 short paragraphs. The end sentence of each paragraph must be: {input}
Make a plan then write. Your output should be of the following format:
Plan:
Your plan here.
Passage:
Your passage here.
'''
vote_prompt = '''Given an instruction and several choices, decide which choice is most promising. Analyze each choice in detail, then conclude in the last line "The best choice is {s}", where s the integer id of the choice.
'''
compare_prompt = '''Briefly analyze the coherency of the following two passages. Conclude in the last line "The more coherent passage is 1", "The more coherent passage is 2", or "The two passages are similarly coherent".
'''
score_prompt = '''Analyze the following passage, then at the last line conclude "Thus the coherency score is {s}", where s is an integer from 1 to 10.
''' | tree-of-thoughts-main | experiements/tree-of-thought-llm/prompts/text.py |
# 5 shot
standard_prompt = '''
Solve 5x5 mini crosswords. Given an input of 5 horizontal clues and 5 vertical clues, generate an output of 5 rows, where each row is 5 letter separated by space.
Input:
h1. A lunar valley
h2. A fatty oil
h3. To entice
h4. To lower; to reduce
h5. A solitary person
v1. According to the roster
v2. Another name for Port-Francqui
v3. An illicit lover; a European lake
v4. To lisp
v5. To come in
Output:
R I L L E
O L E I N
T E M P T
A B A S E
L O N E R
Input:
h1. One who saws
h2. A fungus genus
h3. An assessor
h4. Pasture land
h5. Receiving by the ear
v1. To swell; to increase
v2. The Brazilian macaw; an Australian bird
v3. A Timorese island
v4. Excessive fluid accumulation
v5. Dewy; roscid
Output:
S A W E R
U R E D O
R A T E R
G R A M A
E A R A L
Input:
h1. Dandruff; scum; the bull-trout
h2. One who greets; to vacillate; a British river
h3. A Turkish written decree
h4. Mignon; petty; little
h5. A bishop's permission for a priest to leave a diocese
v1. To steal; to brush across
v2. A sedge (a primitive three-sided grass)
v3. Grape jam
v4. A flatworm larva
v5. Ore refuse; to prepare material for glass by heat
Output:
S C U R F
W A V E R
I R A D E
P E T I T
E X E A T
Input:
h1. Presented; revealed
h2. An interjection expressing sorrow
h3. Benefit; result
h4. A cigarette
h5. Chased up a tree
v1. Swarthy; tawny
v2. An apiarist or bee keeper
v3. To speak formally
v4. To indite; to scribble
v5. An insecticide
Output:
S H O W N
W I R R A
A V A I L
R E T T E
T R E E D
Input:
h1. Scald; an ancient Scandinavian bard
h2. H2O; to irrigate
h3. The companion to an "intro", a postscript or exit piece
h4. An artificial fabric
h5. Deep religious feeling
v1. To rush; to stoop; a descent
v2. A New Zealand fir tree
v3. Mine refuse
v4. The garden dormouse
v5. Like a drone; humming
Output:
S K A L D
W A T E R
O U T R O
O R L O N
P I E T Y
Input:
{input}
Output:
'''
cot_prompt = '''Solve 5x5 mini crosswords. Given an input of 5 horizontal clues and 5 vertical clues, generate thoughts about which 5-letter word fits each clue, then an output of 5 rows, where each row is 5 letter separated by space.
Input:
h1. A lunar valley
h2. A fatty oil
h3. To entice
h4. To lower; to reduce
h5. A solitary person
v1. According to the roster
v2. Another name for Port-Francqui
v3. An illicit lover; a European lake
v4. To lisp
v5. To come in
Thoughts:
h1. A lunar valley: RILLE
h2. A fatty oil: OLEIN
h3. To entice: TEMPT
h4. To lower; to reduce: ABASE
h5. A solitary person: LONER
v1. According to the roster: ROTAL
v2. Another name for Port-Francqui: ILEBO
v3. An illicit lover; a European lake: LEMAN
v4. To lisp: LIPSE
v5. To come in: ENTER
Output:
R I L L E
O L E I N
T E M P T
A B A S E
L O N E R
Input:
h1. One who saws
h2. A fungus genus
h3. An assessor
h4. Pasture land
h5. Receiving by the ear
v1. To swell; to increase
v2. The Brazilian macaw; an Australian bird
v3. A Timorese island
v4. Excessive fluid accumulation
v5. Dewy; roscid
Thoughts:
h1. One who saws: SAWER
h2. A fungus genus: UREDO
h3. An assessor: RATER
h4. Pasture land: GRAMA
h5. Receiving by the ear: EARAL
v1. To swell; to increase: SURGE
v2. The Brazilian macaw; an Australian bird: ARARA
v3. A Timorese island: WETAR
v4. Excessive fluid accumulation: EDEMA
v5. Dewy; roscid: RORAL
Output:
S A W E R
U R E D O
R A T E R
G R A M A
E A R A L
Input:
h1. Dandruff; scum; the bull-trout
h2. One who greets; to vacillate; a British river
h3. A Turkish written decree
h4. Mignon; petty; little
h5. A bishop's permission for a priest to leave a diocese
v1. To steal; to brush across
v2. A sedge (a primitive three-sided grass)
v3. Grape jam
v4. A flatworm larva
v5. Ore refuse; to prepare material for glass by heat
Thoughts:
h1. Dandruff; scum; the bull-trout: SCURF
h2. One who greets; to vacillate; a British river: WAVER
h3. A Turkish written decree: IRADE
h4. Mignon; petty; little: PETIT
h5. A bishop's permission for a priest to leave a diocese: EXEAT
v1. To steal; to brush across: SWIPE
v2. A sedge (a primitive three-sided grass): CAREX
v3. Grape jam: UVATE
v4. A flatworm larva: REDIA
v5. Ore refuse; to prepare material for glass by heat: FRETT
Output:
S C U R F
W A V E R
I R A D E
P E T I T
E X E A T
Input:
h1. Presented; revealed
h2. An interjection expressing sorrow
h3. Benefit; result
h4. A cigarette
h5. Chased up a tree
v1. Swarthy; tawny
v2. An apiarist or bee keeper
v3. To speak formally
v4. To indite; to scribble
v5. An insecticide
Thoughts:
h1. Presented; revealed: SHOWN
h2. An interjection expressing sorrow: WIRRA
h3. Benefit; result: AVAIL
h4. A cigarette: RETTE
h5. Chased up a tree: TREED
v1. Swarthy; tawny: SWART
v2. An apiarist or bee keeper: HIVER
v3. To speak formally: ORATE
v4. To indite; to scribble: WRITE
v5. An insecticide: NALED
Output:
S H O W N
W I R R A
A V A I L
R E T T E
T R E E D
Input:
h1. Scald; an ancient Scandinavian bard
h2. H2O; to irrigate
h3. The companion to an "intro", a postscript or exit piece
h4. An artificial fabric
h5. Deep religious feeling
v1. To rush; to stoop; a descent
v2. A New Zealand fir tree
v3. Mine refuse
v4. The garden dormouse
v5. Like a drone; humming
Thoughts:
h1. Scald; an ancient Scandinavian bard: SKALD
h2. H2O; to irrigate: WATER
h3. The companion to an "intro", a postscript or exit piece: OUTRO
h4. An artificial fabric: ORLON
h5. Deep religious feeling: PIETY
v1. To rush; to stoop; a descent: SWOOP
v2. A New Zealand fir tree: KAURI
v3. Mine refuse: ATTLE
v4. The garden dormouse: LEROT
v5. Like a drone; humming: DRONY
Output:
S K A L D
W A T E R
O U T R O
O R L O N
P I E T Y
Input:
{input}
'''
propose_prompt = '''Let's play a 5 x 5 mini crossword, where each word should have exactly 5 letters.
{input}
Given the current status, list all possible answers for unfilled or changed words, and your confidence levels (certain/high/medium/low), using the format "h1. apple (medium)". Use "certain" cautiously and only when you are 100% sure this is the correct word. You can list more then one possible answer for each word.
'''
value_prompt = '''Evaluate if there exists a five letter word of some meaning that fit some letter constraints (sure/maybe/impossible).
Incorrect; to injure: w _ o _ g
The letter constraint is: 5 letters, letter 1 is w, letter 3 is o, letter 5 is g.
Some possible words that mean "Incorrect; to injure":
wrong (w r o n g): 5 letters, letter 1 is w, letter 3 is o, letter 5 is g. fit!
sure
A person with an all-consuming enthusiasm, such as for computers or anime: _ _ _ _ u
The letter constraint is: 5 letters, letter 5 is u.
Some possible words that mean "A person with an all-consuming enthusiasm, such as for computers or anime":
geek (g e e k): 4 letters, not 5
otaku (o t a k u): 5 letters, letter 5 is u
sure
Dewy; roscid: r _ _ _ l
The letter constraint is: 5 letters, letter 1 is r, letter 5 is l.
Some possible words that mean "Dewy; roscid":
moist (m o i s t): 5 letters, letter 1 is m, not r
humid (h u m i d): 5 letters, letter 1 is h, not r
I cannot think of any words now. Only 2 letters are constrained, it is still likely
maybe
A woodland: _ l _ d e
The letter constraint is: 5 letters, letter 2 is l, letter 4 is d, letter 5 is e.
Some possible words that mean "A woodland":
forest (f o r e s t): 6 letters, not 5
woods (w o o d s): 5 letters, letter 2 is o, not l
grove (g r o v e): 5 letters, letter 2 is r, not l
I cannot think of any words now. 3 letters are constrained, and _ l _ d e seems a common pattern
maybe
An inn: _ d _ w f
The letter constraint is: 5 letters, letter 2 is d, letter 4 is w, letter 5 is f.
Some possible words that mean "An inn":
hotel (h o t e l): 5 letters, letter 2 is o, not d
lodge (l o d g e): 5 letters, letter 2 is o, not d
I cannot think of any words now. 3 letters are constrained, and it is extremely unlikely to have a word with pattern _ d _ w f to mean "An inn"
impossible
Chance; a parasitic worm; a fish: w r a k _
The letter constraint is: 5 letters, letter 1 is w, letter 2 is r, letter 3 is a, letter 4 is k.
Some possible words that mean "Chance; a parasitic worm; a fish":
fluke (f l u k e): 5 letters, letter 1 is f, not w
I cannot think of any words now. 4 letters are constrained, and it is extremely unlikely to have a word with pattern w r a k _ to mean "Chance; a parasitic worm; a fish"
impossible
{input}
''' | tree-of-thoughts-main | experiements/tree-of-thought-llm/prompts/crosswords.py |
tree-of-thoughts-main | experiements/extremely_experimental/reinforcement/v1.py |
|
#give topic [What are quantum field theorem proofs respond in math notation] -> 100 questions by external model -> tree of thoughts for each question
#give dataset -> ask questions about each example and fine tune on like alpaca dataset
import json
from tree_of_thoughts.treeofthoughts import OptimizedTreeofThoughts
from tree_of_thoughts.treeofthoughts import OptimizedOpenAILanguageModel
k = 5
T = 3
b = 5
vth = 0.5
timeout = 10
confidence = 1.0 #cmodel is confident on performance
max_iterations = 40 #tree branch nodes
convergence_threshold = 0.01
convergence_count = 5
class DatasetGenerator:
def __init__(self, openai_language_model, tree_of_thoughts):
self.openai_language_model = openai_language_model
self.tree_of_thoughts = OptimizedOpenAILanguageModel(openai_api_key, api_model="gpt-3.5-turbo")
def generate_questions(self, topic, n_questions=100):
prompt=f"Generate {n_questions} unique questions related to the topic '{topic}':"
response = self.openai_language_model.openai_api_call_handler(prompt, 50 * n_questions, 0.5, 1)
questions_text = self.openai_language_model.openai_choice2text_handler(response.choices[0])
questions = questions_text.split('\n')[:n_questions]
return questions
def generate_dataset(self, topic, n_questions: 1000):
questions = self.generate_questions(topic, n_questions)
dataset = []
for question in questions:
# solution = self.tree_of_thought.solve(question)
solution = tree_of_thoughts.solve(question, k, T, b, vth, timeout, confidence_threshold=confidence, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
dataset_entry = {
"question": question,
"solution": solution
}
dataset.append(dataset_entry)
return dataset
openai_api_key=""
# openai_language_model = OptimizedOpenAILanguageModel(openai_api_key, api_model="gpt-3.5-turbo")
tree_of_thoughts = OptimizedTreeofThoughts(search_algorithm="DFS")
dataset_generator = DatasetGenerator(tree_of_thoughts)
topic = "Artificial Intelligence"
dataset = dataset_generator.generate_dataset(topic)
# Save the dataset to a JSON file
with open("tot_dataset.json", "w") as f:
json.dump(dataset, f, indent=4)
# def generate_dataset(self, topic, n_questions=100):
# questions = self.generate_questions(topic, n_questions)
# dataset = []
# for question in questions:
# thoughts_and_evals = []
# state = [question]
# solution_found = False
# while not solution_found:
# thoughts = self.guidance_language_model.generate_thoughts(state, k)
# state_values = self.guidance_language_model.evaluate_state(thoughts)
# best_thought = self.select_best_thought(thoughts, state_values[best_thought])
# thoughts_and_evals.append((best_thought, state_values[best_thought]))
# state.append(best_thought)
# if self.is_solution(best_thought):
# solution_found = True
# dataset_entry = {
# "question": question,
# "instructions": thoughts_and_evals,
# "solution": best_thought
# }
# dataset.append(dataset_entry)
# return dataset
# def is_solution(self, thought):
# #implement the logic to determine if the thought is a solution
# pass | tree-of-thoughts-main | experiements/extremely_experimental/generate_dataset/main.py |
from abc import abstractmethod, ABC
from langchain import OpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class LangchainCustomLanguageModel(AbstractLanguageModel):
def __init__(self, api_key):
# self.model = model
# docstore = DocstoreExplorer()
model = OpenAI(temperature=0.5, openai_api_key=api_key)
# tools = [
# Tool(
# name="Search",
# func=docstore.search,
# description="useful for when you need to ask with search"
# ),
# Tool(
# name="Lookup",
# func=docstore.lookup,
# description="useful for when you need to ask with lookup"
# )
# ]
self.agent = initialize_agent(llm=model, agent=AgentType.REACT_DOCSTORE, verbose=True)
def generate_thoughts(self, state, k):
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', generate {k} coherent thoughts to continue the reasoning process:"
response = self.agent.arun(input=prompt)
thoughts = response.strip().split('\n')
return thoughts
def evaluate_states(self, states):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the following states of reasoning, vote for the best state:\n{state_text}\n\nVote, and NOTHING ELSE:"
response = self.agent.arun(input=prompt)
try:
value = float(response)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values | tree-of-thoughts-main | experiements/extremely_experimental/prompting/LangChain_model.py |
import concurrent.futures
from abc import ABC, abstractmethod
import openai
import os
import guidance
import time
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_thoughts(self, state, k):
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', generate {1} coherent thoughts to continue the reasoning process:"
prompt += self.ReAct_prompt
if self.use_chat_api:
new_prompt_success = False
"""
# Try prompt and parse in a single shot to save tokens (but if we fail, we end up spending more tokens)
new_prompt = prompt + "Thought string should be output in a format that can be parsed into python array in format [xxx,xxx,xxx]"
response = self.openai_api_call_handler(new_prompt, 100 * k, 0.5, 1)
text = self.openai_choice2text_handler(response.choices[0])
re_parse = re.search(r'\[(.*?)\]', text)
if re_parse:
thoughts_str = re_parse.group(1)
if thoughts_str:
thoughts = thoughts_str.split(',')
new_prompt_success = len(thoughts) == k
if not new_prompt_success:
print(f"Fall back to multi-prompt for chat-completion due to parse fail {text}")
"""
if not new_prompt_success:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
else:
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, and NOTHING ELSE:"
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state:\n{states_text}\n\nVote, and NOTHING ELSE:"
response = self.openai_api_call_handler(prompt, 50, 1)
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states))
print(f"Parallel evaluated state values: {state_values}")
return state_values
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth, timeout=None):
start_time = time.time()
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
def dfs(s, t):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count
if t > T:
thought = self.model.generate_thoughts(s, 1)
value = self.model.evaluate_states({s})[s]
output.append((thought, value))
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for s_prime in sorted(self.model.generate_thoughts(s, k)):
state_value = self.model.evaluate_states({s_prime})[s_prime]
if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
if dfs((*s, s_prime), t + 1):
return True
return False
dfs(x, 1)
return max(output, key=lambda x: x[1]) if output else None
class OptimizedTreeofThoughts(TreeofThoughts):
def solve(self, x, k=5, T=3, b=5, vth=0.5, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
api_key = "api key"
language_model = GuidanceOpenAILanguageModel(api_key)
search_algorithm = "DFS"
#init optimized tree of thoughts
tree_of_thoughts = OptimizedTreeofThoughts(language_model, search_algorithm)
#define the inital state
# Set the input problem and parameters for the Tree of Thoughts algorithm
input_problem = "What are next generation reasoning methods for Large Language Models"
k = 5
T = 3
b = 5
vth = 0.5
timeout = 10
confidence = 1.0
max_iterations = 40
convergence_threshold = 0.01
convergence_count = 5
# Run the Tree of Thoughts algorithm
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth, timeout, confidence_threshold=confidence, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
# Print the solution
print(f"Solution: {solution}") | tree-of-thoughts-main | experiements/extremely_experimental/prompting/guidancePrompt.py |
from tree_of_thoughts.models.openai_models import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import TreeofThoughtsDFS
#
api_model= "gpt-3.5-turbo"
model = OpenAILanguageModel(api_key='api key', api_model=api_model)
#choose search algorithm('BFS' or 'DFS')
search_algorithm = "BFS"
# value or vote
evaluation_strategy = "value"
tree_of_thoughts= TreeofThoughtsDFS(model) #search_algorithm)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
input_problem = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps= 3
max_states = 3
value_threshold= 0.5
#call the solve emthod with the input problem and other params
solution = tree_of_thoughts.solve(input_problem,
# num_thoughts=num_thoughts,
max_steps=max_states,
# max_states=max_states,
value_threshold=value_threshold,
)
#use the solution in your production environment
print(f"solution: {solution}")
| tree-of-thoughts-main | examples/example_totdfs.py |
from tree_of_thoughts.models.openai_models import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts
api_model= "gpt-3.5-turbo"
model = OpenAILanguageModel(api_key='api key', api_model=api_model)
# Initialize the MonteCarloTreeofThoughts class with the model
tree_of_thoughts = MonteCarloTreeofThoughts(model)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
initial_prompt = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=initial_prompt,
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}") | tree-of-thoughts-main | examples/montecarlo_example.py |
from tree_of_thoughts.treeofthoughts import TreeofThoughts, HuggingLanguageModel, MonteCarloTreeofThoughts
model_name="gpt"
model = HuggingLanguageModel(model_name,
model_tokenizer=model_name,
verbose=True)
# Initialize the MonteCarloTreeofThoughts class with the model
tree_of_thoughts = MonteCarloTreeofThoughts(model)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
initial_prompt = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=initial_prompt,
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}") | tree-of-thoughts-main | examples/huggingface_example.py |
from tree_of_thoughts.models.openai_models import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import TreeofThoughts2
#
api_model= "gpt-3.5-turbo"
model = OpenAILanguageModel(api_key='api key', api_model=api_model)
tree_of_thoughts= TreeofThoughts2(model) #search_algorithm)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
input_problem = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
# Solve a problem with the TreeofThoughts
num_thoughts = 1
max_steps = 2
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(input_problem, num_thoughts, max_steps, pruning_threshold)
print(f"solution: {solution}")
| tree-of-thoughts-main | examples/example_tot2.py |
from tree_of_thoughts.models.openai_models import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import TreeofThoughtsASearch
#
api_model= "gpt-4"
model = OpenAILanguageModel(api_key='api key', api_model=api_model)
tree_of_thoughts= TreeofThoughtsASearch(model) #search_algorithm)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
input_problem = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
solution = tree_of_thoughts.solve(input_problem)
print(f"solution: {solution}")
| tree-of-thoughts-main | examples/example_totA.py |
from tree_of_thoughts.treeofthoughts import HFPipelineModel, MonteCarloTreeofThoughts
model_name="gpt2"
gpt2_pipeline_model = HFPipelineModel(model_name)
tree_of_thoughts = MonteCarloTreeofThoughts(gpt2_pipeline_model)
#
initial_prompt = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=initial_prompt,
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}") | tree-of-thoughts-main | examples/pipelinehuggingface.py |
#thought -> evaluated value (0.4, This solution is invalid because x) -> thought prompt + this solution is invalid because + better eval
import json
import os
import time
DATA_PATH = './data'
import logging
import concurrent.futures
from queue import PriorityQueue
from typing import Any, Dict, Union
import numpy as np
from tree_of_thoughts.models.abstract_language_model import AbstractLanguageModel
from tree_of_thoughts.text_generation_web_ui import (
build_text_generation_web_ui_client_llm,
ui_default_parameters,
)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class TreeofThoughts:
def __init__(self, model):
self.model = model
self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {
"nodes": {},
}
self.best_state = None
self.best_value = float("-inf")
self.history = [] #added line initalize history
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def logNewState(self, state, evaluation):
if not (type(state) == str):
state = " | ".join(state)
if state in self.tree['nodes']:
self.tree['nodes'][state]['thoughts'].append(evaluation)
else:
self.tree['nodes'][state] = {'thoughts': [evaluation]}
def adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile):
values = np.array(list(evaluated_thoughts.values()))
if values.size == 0:
return 0
return max(np.percentile(values, percentile), 0.1)
def adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size):
values = list(evaluated_thoughts.values())
if len(values) < window_size:
return np.mean(values) if values else 0
else:
return max(np.mean(values[-window_size:]), 0.1)
######################
class TreeofThoughtsBFS(TreeofThoughts):
def solve(self, initial_prompt, num_thoughts, max_steps, max_states, value_threshold, pruning_threshold=0.5):
current_states = [initial_prompt]
state_values = {}
dynamic_pruning_threshold = pruning_threshold
try:
with concurrent.futures.ThreadPoolExecutor() as executor:
for step in range(1, max_steps + 1):
selected_states = []
for state in current_states:
thoughts = self.model.generate_thoughts(state, num_thoughts, initial_prompt)
futures = [executor.submit(self.model.evaluate_states, {thought: 0}, initial_prompt) for thought in thoughts]
concurrent.futures.wait(futures)
evaluated_thoughts = {thought: fut.result() for thought, fut in zip(thoughts, futures) if isinstance(fut.result(), (int, float))} # check if result is a number
if evaluated_thoughts: # only adjust if you have evaluated thoughts
dynamic_pruning_threshold = self.adjust_pruning_threshold_moving_average(evaluated_thoughts, 5)
for thought, value in evaluated_thoughts.items():
flattened_state = (state, thought) if isinstance(state, str) else (*state, thought)
selected_states.append((flattened_state, value))
selected_states.sort(key=lambda x: x[1], reverse=True)
selected_states = selected_states[:max_states] # Select only the top states
for state, value in selected_states:
if value >= dynamic_pruning_threshold:
state_values[state] = value
self.logNewState(state, value)
logger.debug(f"State Values: {state_values}")
# if state_values:
# highest_rated_solution = max(state_values.items(), key=lambda x: x[1])
# print(f"highest rated solution: {highest_rated_solution}")
# highest_rated_state = highest_rated_solution[0] # Use a different name to avoid confusion
# print(f'highest rated state: {highest_rated_state}')
# try:
# solution = self.model.generate_solution(initial_prompt, highest_rated_state)
# except Exception as e:
# logger.error(f"Error in generating solution: {e}")
# solution = None # Set a fallback value for solution
# return solution if solution is not None else highest_rated_state # Return highest rated state if solution is None
if state_values:
highest_rated_solution = max(state_values.items(), key=lambda x: x[1])
highest_rated_state = highest_rated_solution[0]
solution = self.model.generate_solution(initial_prompt, highest_rated_state)
print(f"Highest_rated solution: {highest_rated_solution} highest_rated_solution: {highest_rated_solution} Solution: {solution}")
return solution if solution else highest_rated_state
else:
return None
except Exception as e:
logger.error(f"Error in tot_bfs: {e}")
return None
###########
class TreeofThoughtsDFS(TreeofThoughts):
def solve(self, initial_prompt, num_thoughts, max_steps, value_threshold, pruning_threshold=0.5):
output = []
def dfs(state, step):
nonlocal output
if step > max_steps:
thought = self.model.generate_thoughts(state, 1, initial_prompt)
value = self.model.evaluate_states({state}, initial_prompt)[state]
output.append((thought, value))
return
thoughts = self.model.generate_thoughts(state, self.num_thoughts, initial_prompt)
evaluated_thoughts = self.model.evaluate_states({thought: 0 for thought in thoughts}, initial_prompt)
filtered_thoughts = [thought for thought in thoughts if evaluated_thoughts[thought] >= self.pruning_threshold]
for next_state in filtered_thoughts:
state_value = self.model.evaluate_states({next_state: 0}, initial_prompt)[next_state]
if state_value > self.value_threshold:
child = (state, next_state) if isinstance(state, str) else (*state, next_state)
dfs(child, step + 1)
try:
dfs(initial_prompt, 1)
best_state, _ = max(output, key=lambda x: x[1])
solution = self.model.generate_solution(initial_prompt, best_state)
return solution if solution else best_state
except Exception as e:
logger.error(f"Error in tot_dfs: {e}")
return None
#v2 => best first search => explores state space of the quality of the states
#priority que or greedy BFS
class TreeofThoughtsBEST:
def __init__(self, model):
self.model = model
self.tree = {"nodes": {}}
def save_tree_to_json(self, file_name):
os.makdirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def log_new_state(self, state, evaluation):
state_key = " | ".join(state) if isinstance(state, tuple) else state
if state_key in self.tree["nodes"]:
self.tree["nodes"][state_key]['thoughts'].append(evaluation)
else:
self.tree['nodes']['state_key'] = {'thoughts': [evaluation]}
def solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold):
visited_states = set()
state_queue = PriorityQueue()
state_queue.put((0, initial_prompt))
for _ in range(max_steps):
if state_queue.empty():
break
_, state = state_queue.get()
if state in visited_states:
continue
visited_states.add(state)
thoughts = self.model.generate_thoughts(state, num_thoughts, initial_prompt)
evaluated_thoughts = {thought: self.model.evaluate_states({thought: 0}, initial_prompt)[thought] for thought in thoughts}
for thought, value in evaluated_thoughts.items():
if value >= pruning_threshold:
new_state = (state, thought) if isinstance(state, str) else (*state, thought)
state_queue.put((value, new_state))
self.log_new_state(new_state, value)
best_state = max(visited_states, key=self.model.evaluate_states)
solution = self.model.generate_solution(initial_prompt, best_state)
print(f"Highest_rated solution: {best_state} Solution: {solution}")
return solution if solution else best_state
#A* search algorithm
class TreeofThoughtsASearch:
def __init__(self, model):
self.model = model
def solve(self, initial_prompt, num_thoughts=5, max_steps=30, pruning_threshold=0.4):
#the open set is implemented as a piorituve quue where the priority is -f_score
open_set = PriorityQueue()
open_set.put((0, 0, initial_prompt))
#the set of visited_states
visited_states = set()
#the g_scores and f-scores are stored as dictionaries
g_scores = {initial_prompt: 0}
f_scores = {initial_prompt: self.model.evaluate_states({initial_prompt: 0}, initial_prompt)[initial_prompt]}
#the parent of each state is stored in a dictionary
came_from = {}
for _ in range(max_steps):
if open_set.empty():
break
_, _, current_state = open_set.get()
if self.is_goal(current_state, f_scores[current_state]):
return self.reconstruct_path(came_from, current_state, initial_prompt)
thoughts = self.model.generate_thoughts(current_state, num_thoughts, initial_prompt)
evaluated_thoughts = {thought: self.model.evaluate_states({thought: 0}, initial_prompt)[thought] for thought in thoughts}
for thought, value in evaluated_thoughts.items():
if value < pruning_threshold or thought in visited_states:
continue
tentative_g_score = g_scores[current_state] + 1 / value
if thought not in g_scores or tentative_g_score < g_scores[thought]:
came_from[thought] = current_state
g_scores[thought] = tentative_g_score
f_scores[thought] = tentative_g_score + value
open_set.put((-f_scores[thought], g_scores[thought], thought))
return self.reconstruct_path(came_from, current_state, initial_prompt)
def is_goal(self, state, score):
#if eval state is above 0.9
return score >= 0.9
def reconstruct_path(self, came_from, current_state, initial_prompt):
path = [current_state]
while current_state in came_from:
current_state = came_from[current_state]
path.append(current_state)
path.reverse()
path = self.reconstruct_path(came_from, current_state, initial_prompt)
solution = self.model.generate_solution(initial_prompt, path)
print(f"Path: {path} solution: {solution}")
return solution if solution else path
class MonteCarloTreeofThoughts(TreeofThoughts):
def __init__(self, model, objective="balance"):
super().__init__(model)
self.objective = objective
self.solution_found = False
self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {
"nodes": {},
"metrics": {"thoughts": {}, "evaluations": {}},
}
def optimize_params(self, num_thoughts, max_steps, max_states):
if self.objective == 'speed':
num_thoughts = max(1, num_thoughts - 1)
max_steps = max(1, max_steps - 1)
max_states = max(1, max_states - 1)
elif self.objective == 'reliability':
num_thoughts += 1
max_steps += 1
max_states += 1
elif self.objective == 'balanace':
if self.solution_found:
num_thoughts = max(1, num_thoughts - 1)
max_steps = max(1, max_steps - 1)
max_states = max(1, max_states - 1)
else:
num_thoughts += 1
max_steps += 1
max_states += 1
return num_thoughts, max_steps, max_states
def solve(self,
initial_prompt: str,
num_thoughts: int,
max_steps: int,
max_states: int,
pruning_threshold: float,
# sleep_time: float,
):
self.file_name = "logs/tree_of_thoughts_output_montecarlo.json"
return self.monte_carlo_search(
initial_prompt,
num_thoughts,
max_steps,
max_states,
pruning_threshold,
# sleep_time,
)
#v3
def monte_carlo_search(self,
initial_prompt: str,
num_thoughts: int,
max_steps: int,
max_states: int,
pruning_threshold: float,
):
current_states = [initial_prompt]
state_values = {}
visit_counts = {initial_prompt: 0}
transposition_table = {}
best_state = None
best_value = float('-inf')
for step in range(1, max_steps + 1):
selected_states = []
for state in current_states:
if state in transposition_table:
transposition_table[state]
else:
time.sleep(1)
thoughts = self.model.generate_thoughts(state, num_thoughts, initial_prompt)
time.sleep(1)
evaluated_thoughts = self.model.evaluate_states(thoughts, initial_prompt)
for thought, value in evaluated_thoughts.items():
flattened_state = (state, thought) if isinstance(state, str) else (*state, thought)
transposition_table[flattened_state] = value
for thought, value in evaluated_thoughts.items():
flattened_state = (state, thought) if isinstance(state, str) else (*state, thought)
if flattened_state not in visit_counts:
visit_counts[flattened_state] = 0
if visit_counts[state] > visit_counts[flattened_state] and visit_counts[flattened_state] > 0:
ucb1_value = value + np.sqrt(2 * np.log(visit_counts[state]) / visit_counts[flattened_state])
if ucb1_value >= pruning_threshold:
selected_states.append(flattened_state)
state_values[flattened_state] = value
# Update the best state if the current state value is greater than the best value
if value > best_value:
best_state = flattened_state
best_value = value
visit_counts[state] += 1
if len(selected_states) > max_states:
current_states = selected_states[:max_states]
self.save_tree_to_json(self.file_name)
# if best_state is not None:
# solution = self.model.generate_solution(initial_prompt, best_state)
# return solution
# else:
# solution = None
# return None
solution = self.model.generate_solution(initial_prompt, best_state)
return solution if solution else best_state
# #does not output state after each thought --- idk why -- needs work
# class OptimizedTreeofThoughts(TreeofThoughts):
# def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# start_time = time.time()
# print(f'Start time {start_time}')
# if self.search_algorithm == 'BFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_bfs(x, k, T, b, pruning_threshold=0.5)
# print(f'result in optimized tree of thoughts: {result}')
# if result:
# return result
# elif self.search_algorithm == 'DFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
# if result:
# return result
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
class TextGenerationWebUILanguageModel(AbstractLanguageModel):
def __init__(self, strategy="cot", evaluation_strategy="value"):
thought_generator_params = ui_default_parameters()
thought_generator_params["max_new_tokens"] = 50
thought_generator_params["temperature"] = 0.5
self.thought_generator = build_text_generation_web_ui_client_llm(parameters=thought_generator_params)
state_voter_params = ui_default_parameters()
state_voter_params["max_new_tokens"] = 10
state_voter_params["temperature"] = 0.2
self.state_voter = build_text_generation_web_ui_client_llm(parameters=state_voter_params)
value_evaluator_params = ui_default_parameters()
value_evaluator_params["max_new_tokens"] = 10
value_evaluator_params["temperature"] = 0.2
self.value_evaluator = build_text_generation_web_ui_client_llm(parameters=value_evaluator_params)
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None):
if self.strategy == "cot":
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("New state generating thought:", state, "\n\n")
# prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {initial_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain."
prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
prompt += self.ReAct_prompt
# print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
# print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states, initial_prompt):
if not states:
return {}
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
# prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {initial_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n
Past solutions:\n\n
{state_text}\n
If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score.
Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE
"""
# and then inside backticks provide an simple and direct bulletpoint list as to why you evaluated this thought the way you did. Provide simple yet intuitive feedback.
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {initial_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
| tree-of-thoughts-main | tree_of_thoughts/treeofthoughts.py |
from tree_of_thoughts.models.openai_models import OpenAILanguageModel, OptimizedOpenAILanguageModel
from tree_of_thoughts.treeofthoughts import TreeofThoughts, MonteCarloTreeofThoughts, TreeofThoughtsBFS, TreeofThoughtsDFS, TreeofThoughtsBEST, TreeofThoughtsASearch
from tree_of_thoughts.models.abstract_language_model import AbstractLanguageModel
from tree_of_thoughts.models.huggingface_model import HuggingLanguageModel, HFPipelineModel
| tree-of-thoughts-main | tree_of_thoughts/__init__.py |
from typing import List, Mapping, Union, Any, Callable
from typing import Dict
import requests
from copy import deepcopy
from dataclasses import dataclass
def _default_extractor(json_response: Dict[str, Any], stop_parameter_name) -> str:
"""
This function extracts the response from the JSON object using the default parameter name.
Parameters:
json_response (dict): The JSON response to be extracted.
stop_parameter_name (str): The name of the parameter that stops the extraction process.
Returns:
str: The extracted response.
"""
return json_response["response"]
@dataclass
class _HTTPBaseLLM:
prompt_url: str
parameters: Dict[str, Union[float, int, str, bool, List[str]]] = None
response_extractor: Callable[[Dict[str, Any]], str] = _default_extractor
stop_parameter_name: str = "stop"
@property
def _llm_type(self) -> str:
return "custom"
def sample_n(self, prompt, stop, n):
samples = []
for _ in range(n):
samples.append(self._call(prompt, stop))
return samples
def _call(self, prompt: str, stop: List[str]) -> str:
# Merge passed stop list with class parameters
stop_list = list(
set(stop).union(set(self.parameters[self.stop_parameter_name]))
)
params = deepcopy(self.parameters)
params[self.stop_parameter_name] = stop_list
response = requests.post(
self.prompt_url,
json={
"prompt": prompt,
**params,
},
)
response.raise_for_status()
return self.response_extractor(
response.json(), params[self.stop_parameter_name]
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def set_parameter(self, parameter_name, parameter_value):
self.parameters[parameter_name] = parameter_value
def ui_default_parameters():
return {
"max_new_tokens": 1024,
"do_sample": True,
"temperature": 0.001,
"top_p": 0.3,
"typical_p": 1,
"repetition_penalty": 1.2,
"top_k": 30,
"min_length": 0,
"no_repeat_ngram_size": 0,
"num_beams": 1,
"penalty_alpha": 0,
"length_penalty": 1.5,
"early_stopping": False,
"seed": -1,
"add_bos_token": True,
"truncation_length": 2048,
"ban_eos_token": False,
"skip_special_tokens": True,
"stopping_strings": [],
}
def _response_extractor(json_response, stopping_strings):
"""Extract relevant information from the given JSON response."""
result = json_response["results"][0]["text"]
for stop_string in stopping_strings:
# The stop strings from text-generation-webui come back without the last char
ss = stop_string[0:-1]
if ss in result:
cut_result = result.split(ss)[0]
return cut_result
return result
def build_text_generation_web_ui_client_llm(
prompt_url="http://0.0.0.0:5000/api/v1/generate", parameters=None
):
"""
This function builds a text generation web UI client using LLM (Largue Language Machine) API.
It takes a URL for the LLM server and optional parameters as inputs.
If parameters are not provided, default ones will be used. The function returns an HTTP client that can generate text based on user input.
Parameters:
prompt_url (str): URL of the LLM server.
parameters (Optional[dict]): Optional parameters to pass to the LLM API.
Returns:
An HTTP client object that can generate text based on user input.
"""
if parameters is None:
parameters = ui_default_parameters()
return _HTTPBaseLLM(
prompt_url=prompt_url,
parameters=parameters,
stop_parameter_name="stopping_strings",
response_extractor=_response_extractor,
)
| tree-of-thoughts-main | tree_of_thoughts/text_generation_web_ui.py |
import re
from typing import Any, Callable, Optional, Tuple, Union
from langchain.llms import OpenAI
from langchain_experimental.tot.checker import ToTChecker
from langchain_experimental.tot.thought import ThoughtValidity
class LangchainTOT:
def __init__(self,
problem_description: Optional[str] = None,
checker_class: Optional[Any] = None):
self.llm = OpenAI(temperature=1, max_tokens=512, model="text-davinci-003")
self.problem_description = problem_description
self.checker_class = checker_class if checker_class else ToTChecker
self.thoughts = []
def set_problem_description(self, problem_description: str):
self.problem_description = problem_description
def set_checker_class(self, checker_class: Any):
self.checker_class = checker_class
def add_thought(self, thought: str):
self.thoughts.append(thought)
def check_thoughts(self) -> ThoughtValidity:
if not self.thoughts:
raise ValueError("No thoughts have been added.")
if not self.problem_description:
raise ValueError("Problem description is not set.")
checker = self.checker_class()
return checker.evaluate(self.problem_description, tuple(self.thoughts))
class MyChecker(ToTChecker):
def __init__(self, validate_fn: Callable[[str, Tuple[str, ...]], ThoughtValidity]):
self.validate_fn = validate_fn
def evaluate(self, problem_description: str, thoughts: Tuple[str, ...] = ()) -> ThoughtValidity:
return self.validate_fn(problem_description, thoughts)
def validate_sudoku(problem_description: str, thoughts: Tuple[str, ...], sudoku_solution: str) -> ThoughtValidity:
last_thought = thoughts[-1]
clean_solution = last_thought.replace(" ", "").replace('"', "")
regex_solution = clean_solution.replace("*", ".").replace("|", "\\|")
if sudoku_solution in clean_solution:
return ThoughtValidity.VALID_FINAL
elif re.search(regex_solution, sudoku_solution):
return ThoughtValidity.VALID_INTERMEDIATE
else:
return ThoughtValidity.INVALID
sudoku_solution = "3,4,1,2|1,2,3,4|2,1,4,3|4,3,2,1"
my_checker = MyChecker(validate_fn=lambda p, t: validate_sudoku(p, t, sudoku_solution))
problem_description = """
3,*,*,2|1,*,3,*|*,1,*,3|4,*,*,1
- This is a 4x4 Sudoku puzzle.
- The * represents a cell to be filled.
- The | character separates rows.
- At each step, replace one or more * with digits 1-4.
- There must be no duplicate digits in any row, column or 2x2 subgrid.
- Keep the known digits from previous valid thoughts in place.
- Each thought can be a partial or the final solution.
""".strip()
langchain_tot = LangchainTOT(problem_description=problem_description, checker_class=lambda: my_checker)
langchain_tot.add_thought("3,*,*,2|1,*,3,*|*,1,*,3|4,*,*,1")
print(langchain_tot.check_thoughts())
| tree-of-thoughts-main | tree_of_thoughts/langchain_tot.py |
tree-of-thoughts-main | tree_of_thoughts/models/__init__.py |
|
import guidance
from tree_of_thoughts.models.abstract_language_model import AbstractLanguageModel
import time
import os
import openai
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
| tree-of-thoughts-main | tree_of_thoughts/models/guidance_model.py |
import os
import openai
import time
from tree_of_thoughts.models.abstract_language_model import AbstractLanguageModel
import concurrent.futures
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 400, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
# print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.openai_api_call_handler(prompt, 300, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("New state generating thought:", state, "\n\n")
# prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {initial_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain."
prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
prompt += self.ReAct_prompt
# print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
# print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state, rejected_solutions=None):
try:
if isinstance(state, list):
state_text = '\n'.join(state)
else:
state_text = state
prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
answer = self.generate_text(prompt, 1)
print(f'Answerrrrrr {answer}')
# print(thoughts)
# print(f"General Solution : {answer}")
return answer
except Exception as e:
logger.error(f"Error in generate_solutions: {e}")
return None
def evaluate_states(self, states, initial_prompt):
if not states:
return {}
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
# prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {initial_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n
Past solutions:\n\n
{state_text}\n
If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score.
Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE
"""
# and then inside backticks provide an simple and direct bulletpoint list as to why you evaluated this thought the way you did. Provide simple yet intuitive feedback.
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {initial_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
# def solution(self, states, initial_prompt):
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, initial_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, initial_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
| tree-of-thoughts-main | tree_of_thoughts/models/openai_models.py |
import requests
import os
class Anthropic:
"""Anthropic large language models."""
def __init__(self, model="claude-2", max_tokens_to_sample=256, temperature=None, top_k=None, top_p=None, streaming=False, default_request_timeout=None):
self.model = model
self.max_tokens_to_sample = max_tokens_to_sample
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.streaming = streaming
self.default_request_timeout = default_request_timeout or 600
self.anthropic_api_url = os.getenv("ANTHROPIC_API_URL", "https://api.anthropic.com")
self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
def _default_params(self):
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return d
def _call(self, prompt, stop=None):
"""Call out to Anthropic's completion endpoint."""
stop = stop or []
params = self._default_params()
headers = {"Authorization": f"Bearer {self.anthropic_api_key}"}
data = {
"prompt": prompt,
"stop_sequences": stop,
**params
}
response = requests.post(f"{self.anthropic_api_url}/completions", headers=headers, json=data, timeout=self.default_request_timeout)
return response.json().get("completion")
| tree-of-thoughts-main | tree_of_thoughts/models/anthropic.py |
from abc import ABC, abstractmethod
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
| tree-of-thoughts-main | tree_of_thoughts/models/abstract_language_model.py |
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
from tree_of_thoughts.models.abstract_language_model import AbstractLanguageModel
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
class HFPipelineModel(AbstractLanguageModel):
def __init__(self, model_name, verbose=False):
self.model_name = model_name
self.pipeline = pipeline("text-generation", model=model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(input_ids=inputs["input_ids"], max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
generated_outputs = self.pipeline(prompt, max_length=max_length, num_return_sequences=1)
value_text = generated_outputs[0]["generated_text"]
value = float(value_text)
print(f'value {value}')
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
def load(model_name, verbose=False):
return HFPipelineModel(model_name, verbose)
| tree-of-thoughts-main | tree_of_thoughts/models/huggingface_model.py |
from setuptools import setup, find_packages
setup(
name = 'omnimorph',
packages = find_packages(exclude=[]),
version = '0.0.7',
license='MIT',
description = 'OmniMorph - Pytorch',
author = 'Agora',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/OmniMorph',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | OmniMorph-master | setup.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
self._fusion_techniques = {}
# preregister and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
self.register_and_instantiate('text', TextEmbedding, num_embeddings=10000, embedding_dim=768)
self.register_and_instantiate('vision', VisionEmbedding, img_size=224, patch_size=16, in_chans=3, embed_dim=768)
self.register_and_instantiate('audio', AudioEmbedding, in_channels=128, embed_dim=768)
# self.register_and_instantiate('video', VideoEmbedding, num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Instantiate VisionLanguageEmbedding with visionembeddings and textembeddings instances
vision_embed_instance = self._embedding_instances.get('vision')
text_embed_instance = self._embedding_instances.get('text')
self.vision_language_embedding = VisionLanguageEmbedding(text_embed_instance, vision_embed_instance)
def register_and_instantiate(self, modality_type, embedding_class, **kwargs):
self.register_embedding(modality_type, embedding_class)
self.instantiate_embedding(modality_type, **kwargs)
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, fusion_technique=None, file_extension=None, user_defined_modality=None, custom_modality_fn=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data, file_extension=file_extension, user_defined_modality=user_defined_modality, custom_modality_fn=custom_modality_fn)
print(modality_type)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
embedding = embedding_instance(input_data, **kwargs)
print(embedding)
if fusion_technique:
fusion_fn = self._fusion_techniques.get(fusion_technique)
if fusion_fn:
embedding = fusion_fn(embedding)
print(embedding)
else:
raise ValueError(f"Unsupported fusion technique: {fusion_technique}")
return embedding
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")
def detect_modality(self, input_data, file_extension=None, user_defined_modality=None, custom_modality_fn=None):
if user_defined_modality:
return user_defined_modality
if custom_modality_fn:
return custom_modality_fn(input_data)
if file_extension:
extension_mapping = {
'.txt': 'text', '.json': 'text',
'.jpg': 'vision', '.png': 'vision',
'.mp3': 'audio', '.wav': 'audio',
'.mp4': 'video', '.avi': 'video',
}
return extension_mapping.get(file_extension.lower())
# Existing modality detection logic
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
def register_fusion_technique(self, technique_name, fusion_fn):
self._fusion_techniques[technique_name] = fusion_fn
omni_morph = OmniMorph()
text_input = torch.randint(0, 10000, (1, 50))
# vision_input = torch.randn(1, 3, 224, 224)
# audio_input = torch.randn(1, 128, 100)
# audio_input = audio_input.unsqueeze(1) # Add a new dimension for channels
text_embedding = omni_morph(text_input, user_defined_modality='text') # modality_type is automatically detected
# vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
# audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
| OmniMorph-master | OmniMorph.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniModalityEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed, audio_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
self.audio_embed = audio_embed
def forward(self, input_data, modality_type, **kwargs):
if modality_type == "text":
return self.text_embed(input_data, **kwargs)
elif modality_type == "vision":
return self.vision_embed(input_data, **kwargs)
elif modality_type == "audio":
return self.audio_embed(input_data, **kwargs)
else:
raise ValueError(f"Unsupported modality type {modality_type}")
#instantiate the embedding module
text_embed = TextEmbedding(num_embeddings=10000, embedding_dim=768)
vision_embed = VisionEmbedding(img_size=224, patch_size=16, in_chans=3, embed_dim=768)
audio_embed = AudioEmbedding(in_channels=128, embed_dim=768)
#create the omnimodality embedding instance
OmniMorph = OmniModalityEmbedding(text_embed, vision_embed, audio_embed)
#example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
text_embedding = OmniMorph(text_input, "text")
vision_embedding = OmniMorph(vision_input, 'vision')
audio_embedding = OmniMorph(audio_input, 'audio')
| OmniMorph-master | iterations/OMNI.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
self._fusion_techniques = {}
# preregister and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
self.register_and_instantiate('text', TextEmbedding, num_embeddings=10000, embedding_dim=768)
self.register_and_instantiate('vision', VisionEmbedding, img_size=224, patch_size=16, in_chans=3, embed_dim=768)
self.register_and_instantiate('audio', AudioEmbedding, in_channels=128, embed_dim=768)
# self.register_and_instantiate('video', VideoEmbedding, num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Instantiate VisionLanguageEmbedding with visionembeddings and textembeddings instances
vision_embed_instance = self._embedding_instances.get('vision')
text_embed_instance = self._embedding_instances.get('text')
self.vision_language_embedding = VisionLanguageEmbedding(text_embed_instance, vision_embed_instance)
def register_and_instantiate(self, modality_type, embedding_class, **kwargs):
self.register_embedding(modality_type, embedding_class)
self.instantiate_embedding(modality_type, **kwargs)
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, fusion_technique=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
print(modality_type)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
embedding = embedding_instance(input_data, **kwargs)
print(embedding)
if fusion_technique:
fusion_fn = self._fusion_techniques.get(fusion_technique)
if fusion_fn:
embedding = fusion_fn(embedding)
print(embedding)
else:
raise ValueError(f"Unsupported fusion technique: {fusion_technique}")
return embedding
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")
def detect_modality(self, input_data):
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
def register_fusion_technique(self, technique_name, fusion_fn):
self._fusion_techniques[technique_name] = fusion_fn
omni_morph = OmniMorph()
text_input = torch.randint(0, 10000, (1, 50))
# vision_input = torch.randn(1, 3, 224, 224)
# audio_input = torch.randn(1, 128, 100)
# audio_input = audio_input.unsqueeze(1) # Add a new dimension for channels
text_embedding = omni_morph(text_input) # modality_type is automatically detected
# vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
# audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
| OmniMorph-master | iterations/OMNI4.py |
import torch
import torch.nn as nn
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
return embedding_instance(input_data, **kwargs)
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")\
def detect_modality(self, input_data):
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
omni_morph = OmniMorph()
# Register and instantiate embeddings
omni_morph.register_embedding('text', TextEmbedding)
omni_morph.instantiate_embedding('text', num_embeddings=10000, embedding_dim=768)
omni_morph.register_embedding('vision', VisionEmbedding)
omni_morph.instantiate_embedding('vision', img_size=224, patch_size=16, in_chans=3, embed_dim=768)
omni_morph.register_embedding('audio', AudioEmbedding)
omni_morph.instantiate_embedding('audio', in_channels=128, embed_dim=768)
omni_morph.register_embedding('video', VideoEmbedding)
omni_morph.instantiate_embedding('video', num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
video_input = torch.randn(1, 3, 10, 224, 224)
text_embedding = omni_morph(text_input) # modality_type is automatically detected
vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
video_embedding = omni_morph(video_input) # modality_type is automatically detected
| OmniMorph-master | iterations/OMNI3.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, *args, **kwargs):
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
self.add_module(f"{modality_type}_embedding", self._embedding_instances[modality_type])
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
return embedding_instance(input_data, **kwargs)
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated.")
def detect_modality(self, input_data):
# Implement heuristics to automatically detect input data modality
# For example:
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
else:
raise ValueError("Unable to detect input data modality.")
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
# Instantiate OmniMorph
omni_morph = OmniMorph()
# Register and instantiate embeddings
omni_morph.register_embedding('text', TextEmbedding)
omni_morph.instantiate_embedding('text', num_embeddings=10000, embedding_dim=768)
omni_morph.register_embedding('vision', VisionEmbedding)
omni_morph.instantiate_embedding('vision', img_size=224, patch_size=16, in_chans=3, embed_dim=768)
omni_morph.register_embedding('audio', AudioEmbedding)
omni_morph.instantiate_embedding('audio', in_channels=128, embed_dim=768)
# Example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
text_embedding = omni_morph(text_input) # modality_type is automatically detected
vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
audio_embedding = omni_morph(audio_input) # modality_type is automatically detected | OmniMorph-master | iterations/OMNI2.py |
from setuptools import setup, find_packages
setup(
name = 'blockwise-parallel-transformer',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = '32x Faster Attentionn',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/Blockwise-Parallel-Transformer',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'jax',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | Blockwise-Parallel-Transformer-main | setup.py |
from jax import random
from blockwise_parallel import BlockwiseParallelTransformerAttention
from torch.nn import Embedding
#hyperparams
input_size = 512
num_heads = 8
hidden_size = 512
num_layers = 6
max_seq_len = 1024
block_size = 64
#create random input sequence
key = random.PRNGKey(0)
x = random.normal(key, (1, max_seq_len, input_size))
#create instance
attention = BlockwiseParallelTransformerAttention(input_size,
num_heads,
hidden_size,
num_layers,
max_seq_len,
block_size)
##compute the output of the attention
output = attention(x)
#print the shape of the output
print(output.shape) | Blockwise-Parallel-Transformer-main | example.py |
Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/__init__.py |
|
# coding=utf-8
# Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Optional, Tuple
import json
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import lax
from flax.linen import partitioning as nn_partitioning
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
from transformers.generation.flax_logits_process import FlaxLogitsProcessorList
from transformers import AutoTokenizer
from jax.sharding import PartitionSpec as PS
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from bpt.tools.utils import function_args_to_config, load_pickle, open_file
from bpt.tools.jax_utils import (
with_sharding_constraint, get_jax_mesh, get_gradient_checkpoint_policy
)
from bpt.blocks.memeff import AttentionBlock as MemEffAttentionBlock
from bpt.blocks.blockwise_parallel_v1 import AttentionBlock as BPAttentionBlock_v1
from bpt.blocks.blockwise_parallel import AttentionBlock as BPAttentionBlock, Blockwise_LM_Head
from bpt.blocks.vanilla import AttentionBlock as VanillaAttentionBlock
GPT_STANDARD_CONFIGS = {
# 1.3B
'1b': {
'vocab_size': 50432,
'n_embd': 2048,
'n_inner': 8192,
'n_layer': 24,
'n_head': 16,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 2.7B
'3b': {
'vocab_size': 50432,
'n_embd': 2560,
'n_inner': 10240,
'n_layer': 32,
'n_head': 32,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 80,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 6.7B
'7b': {
'vocab_size': 50432,
'n_embd': 4096,
'n_inner': 16384,
'n_layer': 32,
'n_head': 32,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 13B
'13b': {
'vocab_size': 50432,
'n_embd': 5120,
'n_inner': 20480,
'n_layer': 40,
'n_head': 40,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 30B
'30b': {
'vocab_size': 50432,
'n_embd': 7168,
'n_inner': 28672,
'n_layer': 48,
'n_head': 56,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 70B
'70b': {
'vocab_size': 50432,
'n_embd': 8192,
'n_inner': 32768,
'n_layer': 80,
'n_head': 64,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
'debug': { # A small model for debugging
'vocab_size': 50432,
'n_embd': 128,
'n_inner': 256,
'n_layer': 2,
'n_head': 4,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 32,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
}
class GPTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GPTModel`]. It is used to instantiate a GPT-J
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GPT-J
[EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
[`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50432):
Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTModel`].
n_positions (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
rotary_dim (`int`, *optional*, defaults to 64):
Number of dimensions in the embedding that Rotary Position Embedding is applied to.
n_inner (`int`, *optional*, defaults to 0):
Dimensionality of the inner feed-forward layers. 0 will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import GPTModel, GPTConfig
>>> # Initializing a GPT-J 6B configuration
>>> configuration = GPTConfig()
>>> # Initializing a model from the configuration
>>> model = GPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gpt"
attribute_map = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50432,
n_positions=2048,
n_embd=4096,
n_layer=28,
n_head=16,
rotary_dim=64,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
tie_word_embeddings=False,
gradient_checkpointing='nothing_saveable',
n_real_tokens=50257,
fcm_min_ratio=0.0,
fcm_max_ratio=0.0,
causal=True,
attn_type='dot',
q_chunk_size=1024,
k_chunk_size=2048,
scan_layers=True,
param_scan_axis=0,
float32_logits=False,
**kwargs
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.rotary_dim = rotary_dim
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.n_real_tokens = n_real_tokens
self.fcm_min_ratio = fcm_min_ratio
self.fcm_max_ratio = fcm_max_ratio
self.causal = causal
self.attn_type = attn_type
self.q_chunk_size = q_chunk_size
self.k_chunk_size = k_chunk_size
self.scan_layers = scan_layers
self.param_scan_axis = param_scan_axis
self.float32_logits = float32_logits
if self.n_real_tokens is None:
self.n_real_tokens = self.vocab_size
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
@classmethod
def get_default_config(cls, updates=None):
none_arg_types = dict(
n_inner=int,
rotary_dim=int,
)
config = function_args_to_config(cls.__init__, none_arg_types=none_arg_types)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@staticmethod
def get_jax_mesh(axis_dims):
return get_jax_mesh(axis_dims, ('dp', 'fsdp', 'mp'))
@staticmethod
def get_partition_rules(scan_layers=False):
""" Parition rules for GPT. Note that these rules are orderd, so that
the beginning rules match first. It is important to use
PartitionSpec() instead of None here because JAX does not treat
None as a pytree leaf.
"""
if scan_layers:
return (
('transformer/wte/embedding', PS('mp', 'fsdp')),
('attn/(k_proj|q_proj|v_proj)/kernel', PS(None, 'fsdp', 'mp')),
('attn/out_proj/kernel', PS(None, 'mp', 'fsdp')),
('attn/fc_in/kernel', PS(None, 'fsdp', 'mp')),
('attn/fc_in/bias', PS(None, 'mp')),
('attn/fc_out/kernel', PS(None, 'mp', 'fsdp')),
('attn/fc_out/bias', PS(None, None)),
('ln_[0-9]+/bias', PS(None, None)),
('[0-9]+/ln_[0-9]+/scale', PS(None, None)),
('ln_f/bias', PS(None)),
('ln_f/scale', PS(None)),
('lm_head/kernel', PS('fsdp', 'mp')),
('lm_head/bias', PS('mp')),
('.*', PS(None)),
)
else:
return (
('transformer/wte/embedding', PS('mp', 'fsdp')),
('attn/(k_proj|q_proj|v_proj)/kernel', PS('fsdp', 'mp')),
('attn/out_proj/kernel', PS('mp', 'fsdp')),
('attn/fc_in/kernel', PS('fsdp', 'mp')),
('attn/fc_in/bias', PS('mp')),
('attn/fc_out/kernel', PS('mp', 'fsdp')),
('attn/fc_out/bias', PS(None)),
('ln_[0-9]+/bias', PS(None)),
('[0-9]+/ln_[0-9]+/scale', PS(None)),
('ln_f/bias', PS(None)),
('ln_f/scale', PS(None)),
('lm_head/kernel', PS('fsdp', 'mp')),
('lm_head/bias', PS('mp')),
('.*', PS(None)),
)
@staticmethod
def get_weight_decay_exclusions():
return (
'ln_[0-9]+/bias', 'ln_[0-9]+/scale', 'ln_f/bias', 'ln_f/scale',
'bias'
)
@staticmethod
def rng_keys():
return ('params', 'dropout', 'fcm')
@staticmethod
def get_tokenizer_config(updates=None):
config = ConfigDict()
config.name = 'EleutherAI/gpt-j-6B'
config.bos_token = '<|endoftext|>'
config.eos_token = '<|endoftext|>'
config.pad_token = '<|extratoken_40|>'
config.cls_token = '<|extratoken_41|>'
config.mask_token = '<|extratoken_42|>'
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_tokenizer(cls, config, padding_side='left', truncation_side='right'):
config = cls.get_tokenizer_config(config)
return AutoTokenizer.from_pretrained(
config.name,
bos_token=config.bos_token,
eos_token=config.eos_token,
pad_token=config.pad_token,
cls_token=config.cls_token,
mask_token=config.mask_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
@staticmethod
def load_pretrained(name, dtype=jnp.float32):
with jax.default_device(jax.devices("cpu")[0]):
params = FlaxGPTForCausalLM.from_pretrained(
name, _do_init=False, dtype=dtype
)[1]
params = freeze({'params': params})
return jax.device_get(params)
@classmethod
def load_config(cls, path):
if path in GPT_STANDARD_CONFIGS:
return cls.from_dict(GPT_STANDARD_CONFIGS[path])
load_type, load_path = path.split('::', 1)
if load_type == 'pickle':
return cls.from_dict(load_pickle(load_path)['gpt_config'])
elif load_type == 'json':
with open_file(load_path, 'r') as fin:
raw_config = fin.read()
return cls.from_dict(json.loads(raw_config))
elif load_type == 'huggingface':
return cls.from_pretrained(load_path)
else:
raise ValueError(f'Unsupported load config type: {load_type}')
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "gpt"
_CONFIG_FOR_DOC = "GPTConfig"
GPT_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`GPTConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class FlaxGPTBlock(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
hidden_size = self.config.hidden_size
inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
attention_blocks = {
# default vanilla transformer (Vaswani et al).
'vanilla': VanillaAttentionBlock,
# default memory efficient transformer (Rabe et al and Dao et al).
'memeff': MemEffAttentionBlock,
# default blockwise parallel transformer (Liu et al).
'blockwise_parallel': BPAttentionBlock,
# less cleaner blockwise parallel transformer used in the paper.
'blockwise_parallel_v1': BPAttentionBlock_v1,
}
if self.config.attn_type in attention_blocks:
Block = attention_blocks[self.config.attn_type]
else:
raise ValueError(f"Unknown attention type {self.config.attn_type}")
self.attn = Block(
self.config.q_chunk_size,
self.config.k_chunk_size,
self.config.hidden_size,
self.config.num_attention_heads,
self.config.rotary_dim,
inner_dim,
self.config.layer_norm_epsilon,
self.config.activation_function,
self.config.attn_pdrop,
self.config.resid_pdrop,
self.config.max_position_embeddings,
self.dtype,
self.config.causal,
policy=self.config.gradient_checkpointing,
prevent_cse=not self.config.scan_layers,
float32_logits=self.config.float32_logits,
)
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask=None,
):
attn_outputs = self.attn(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
)
attn_weights = None
if self.config.scan_layers: # NOTE: this is a hack to work with scan_layers
outputs = attn_outputs, None
else:
outputs = (attn_outputs, attn_weights) if output_attentions else (attn_outputs,)
return outputs
class FlaxGPTPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTConfig
base_model_prefix = "transformer"
module_class: nn.Module = None
def __init__(
self,
config: GPTConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length))
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return init_variables["cache"]
def _get_logits_processor(self,*args, **kwargs) -> FlaxLogitsProcessorList:
processors = super()._get_logits_processor(*args, **kwargs)
def squash_extra_tokens(input_ids, scores, cur_len):
return scores.at[:, self.config.n_real_tokens:].set(-float('inf'))
processors.append(squash_extra_tokens)
return processors
@add_start_docstrings_to_model_forward(GPT_INPUTS_DOCSTRING)
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
params: dict = None,
past_key_values: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
batch_size, sequence_length = input_ids.shape
if position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
if attention_mask is None:
attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
False,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
return outputs
class FlaxGPTBlockCollection(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if not deterministic and self.config.fcm_max_ratio > 0:
# Apply forgetful causal mask
batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]
fcm_ratio = jax.random.uniform(
self.make_rng('fcm'), shape=(batch_size, 1, 1, 1),
minval=self.config.fcm_min_ratio,
maxval=self.config.fcm_max_ratio
)
fcm_mask = jax.random.uniform(
self.make_rng('fcm'),
shape=(batch_size, 1, seq_length, seq_length)
) > fcm_ratio
fcm_mask = fcm_mask.at[:, :, :, 0].set(True)
fcm_mask = fcm_mask.astype('bool')
else:
fcm_mask = None
block = FlaxGPTBlock
if self.config.gradient_checkpointing != '':
FlaxGPT2CheckpointBlock = nn.remat(
block, static_argnums=(3, 4, 5, 6),
prevent_cse=not self.config.scan_layers,
policy=get_gradient_checkpoint_policy(self.config.gradient_checkpointing)
)
block = FlaxGPT2CheckpointBlock
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(config=self.config, name='scan_decoder', dtype=self.dtype)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxGPTModule(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxGPTBlockCollection(self.config, dtype=self.dtype)
self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = outputs[1] + (hidden_states,)
outputs = (hidden_states, all_hidden_states) + outputs[2:]
else:
outputs = (hidden_states,) + outputs[1:]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=outputs[1],
attentions=outputs[-1],
)
@add_start_docstrings(
"The bare GPT Model transformer outputting raw hidden-states without any specific head on top.",
GPT_START_DOCSTRING,
)
class FlaxGPTModel(FlaxGPTPreTrainedModel):
module_class = FlaxGPTModule
append_call_sample_docstring(
FlaxGPTModel,
_CHECKPOINT_FOR_DOC,
FlaxCausalLMOutput,
_CONFIG_FOR_DOC,
)
class FlaxGPTForCausalLMModule(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.transformer = FlaxGPTModule(self.config, dtype=self.dtype)
if self.config.attn_type == 'blockwise_parallel' or self.config.attn_type == 'blockwise_parallel_v1':
self.lm_head = Blockwise_LM_Head(self.config.vocab_size,
self.config.q_chunk_size, dtype=self.dtype,
prevent_cse=not self.config.scan_layers)
else:
self.lm_head = nn.Dense(
self.config.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
batch_size, seq_length = input_ids.shape
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(
jnp.clip(jnp.cumsum(attention_mask, axis=-1) - 1, a_min=0),
(batch_size, seq_length)
)
outputs = self.transformer(
input_ids,
attention_mask,
position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + outputs[1:]
return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings(
"""
The GPT Model transformer with a language modeling head on top.
""",
GPT_START_DOCSTRING,
)
class FlaxGPTForCausalLM(FlaxGPTPreTrainedModel):
module_class = FlaxGPTForCausalLMModule
def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None):
# initializing the cache
batch_size, seq_length = input_ids.shape
past_key_values = self.init_cache(batch_size, max_length)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since GPT uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if attention_mask is not None:
position_ids = attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"attention_mask": extended_attention_mask,
"position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
return model_kwargs
append_call_sample_docstring(
FlaxGPTForCausalLM,
_CHECKPOINT_FOR_DOC,
FlaxCausalLMOutput,
_CONFIG_FOR_DOC,
)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/model.py |
import dataclasses
import pprint
from functools import partial
import re
from tqdm import tqdm, trange
import numpy as np
import bpt.tools.utils as utils
import jax
import jax.numpy as jnp
from jax.experimental.pjit import pjit
from jax.sharding import PartitionSpec as PS
import flax
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.train_state import TrainState
import optax
from bpt.data import Dataset, TextProcessor
from bpt.tools.checkpoint import StreamingCheckpointer
from bpt.tools.optimizers import OptimizerFactory
from bpt.tools.jax_utils import (
JaxRNG, next_rng, match_partition_rules,
cross_entropy_loss_and_accuracy, named_tree_map, global_norm,
set_random_seed, average_metrics, get_weight_decay_mask,
make_shard_and_gather_fns, with_sharding_constraint, tree_apply, get_metrics,
)
from bpt.model import GPTConfig, FlaxGPTForCausalLMModule
from bpt.blocks.blockwise_parallel import blockwise_cross_entropy
FLAGS, FLAGS_DEF = utils.define_flags_with_default(
seed=42,
initialize_jax_distributed=False,
mesh_dim='1,-1,1',
total_steps=10000,
load_gpt_config='',
update_gpt_config='',
load_checkpoint='',
load_dataset_state='',
log_freq=50,
save_model_freq=0,
save_milestone_freq=0,
eval_steps=0,
tokenizer=GPTConfig.get_tokenizer_config(),
text_processor=TextProcessor.get_default_config(),
train_dataset=Dataset.get_default_config(),
eval_dataset=Dataset.get_default_config(),
optimizer=OptimizerFactory.get_default_config(),
checkpointer=StreamingCheckpointer.get_default_config(),
gpt=GPTConfig.get_default_config(),
logger=utils.WandBLogger.get_default_config(),
log_all_worker=False,
profile_steps=0,
stop_after_profile=True,
)
def main(argv):
if FLAGS.initialize_jax_distributed:
jax.distributed.initialize()
variant = utils.get_user_flags(FLAGS, FLAGS_DEF)
flags_config_dict = utils.user_flags_to_config_dict(FLAGS, FLAGS_DEF)
logger = utils.WandBLogger(
config=FLAGS.logger,
variant=variant,
enable=FLAGS.log_all_worker or (jax.process_index() == 0),
)
set_random_seed(FLAGS.seed)
if FLAGS.load_dataset_state != '':
dataset = utils.load_pickle(FLAGS.load_dataset_state)
else:
tokenizer = GPTConfig.get_tokenizer(FLAGS.tokenizer)
text_processor = TextProcessor(FLAGS.text_processor, tokenizer)
dataset = Dataset(FLAGS.train_dataset, tokenizer, text_processor)
if FLAGS.eval_steps > 0:
eval_dataset = Dataset(
FLAGS.eval_dataset, dataset.tokenizer, dataset.text_processor,
)
eval_iterator = iter(eval_dataset.val_iter())
seq_length = dataset.seq_length
if FLAGS.load_gpt_config != '':
gpt_config = GPTConfig.load_config(FLAGS.load_gpt_config)
update_gpt_config = GPTConfig(**FLAGS.gpt)
gpt_config.update(dict(
q_chunk_size=update_gpt_config.q_chunk_size,
k_chunk_size=update_gpt_config.k_chunk_size,
attn_type=update_gpt_config.attn_type,
n_positions=update_gpt_config.n_positions,
gradient_checkpointing=update_gpt_config.gradient_checkpointing,
scan_layers=update_gpt_config.scan_layers,
param_scan_axis=update_gpt_config.param_scan_axis,
))
else:
gpt_config = GPTConfig(**FLAGS.gpt)
if FLAGS.update_gpt_config != '':
gpt_config.update(dict(eval(FLAGS.update_gpt_config)))
gpt_config.update(dict(
bos_token_id=dataset.tokenizer.bos_token_id,
eos_token_id=dataset.tokenizer.eos_token_id,
))
if gpt_config.vocab_size < dataset.vocab_size:
gpt_config.update(dict(vocab_size=dataset.vocab_size))
model = FlaxGPTForCausalLMModule(gpt_config)
optimizer, optimizer_info = OptimizerFactory.get_optimizer(
FLAGS.optimizer,
get_weight_decay_mask(GPTConfig.get_weight_decay_exclusions()),
)
def create_trainstate_from_params(params):
return TrainState.create(params=params, tx=optimizer, apply_fn=None)
def init_fn(rng):
rng_generator = JaxRNG(rng)
params = model.init(
input_ids=jnp.zeros((4, seq_length), dtype=jnp.int32),
position_ids=jnp.zeros((4, seq_length), dtype=jnp.int32),
attention_mask=jnp.ones((4, seq_length), dtype=jnp.int32),
rngs=rng_generator(gpt_config.rng_keys()),
)
return TrainState.create(params=params, tx=optimizer, apply_fn=None)
if FLAGS.gpt.attn_type == 'blockwise_parallel' or FLAGS.gpt.attn_type == 'blockwise_parallel_v1':
cross_entropy_loss_and_accuracy_fn = partial(blockwise_cross_entropy,
policy=FLAGS.gpt.gradient_checkpointing,
chunk_size=FLAGS.gpt.q_chunk_size,
prevent_cse=not FLAGS.gpt.scan_layers,)
else:
cross_entropy_loss_and_accuracy_fn = cross_entropy_loss_and_accuracy
def train_step(train_state, rng, batch):
rng_generator = JaxRNG(rng)
input_tokens = with_sharding_constraint(batch['input_tokens'], PS(('dp', 'fsdp')))
output_tokens = with_sharding_constraint(batch['output_tokens'], PS(('dp', 'fsdp')))
loss_masks = with_sharding_constraint(batch['loss_masks'], PS(('dp', 'fsdp')))
def loss_and_accuracy(params):
logits = model.apply(
params,
input_tokens,
deterministic=False,
rngs=rng_generator(gpt_config.rng_keys()),
).logits
return cross_entropy_loss_and_accuracy_fn(logits, output_tokens, loss_masks)
grad_fn = jax.value_and_grad(loss_and_accuracy, has_aux=True)
(loss, accuracy), grads = grad_fn(train_state.params)
train_state = train_state.apply_gradients(grads=grads)
metrics = dict(
loss=loss,
accuracy=accuracy,
learning_rate=optimizer_info['learning_rate_schedule'](train_state.step),
gradient_norm=global_norm(grads),
param_norm=global_norm(train_state.params),
)
return train_state, rng_generator(), metrics
def eval_step(train_state, rng, batch):
rng_generator = JaxRNG(rng)
input_tokens = with_sharding_constraint(batch['input_tokens'], PS(('dp', 'fsdp')))
output_tokens = with_sharding_constraint(batch['output_tokens'], PS(('dp', 'fsdp')))
loss_masks = with_sharding_constraint(batch['loss_masks'], PS(('dp', 'fsdp')))
logits = model.apply(
train_state.params,
input_tokens,
deterministic=True,
rngs=rng_generator(gpt_config.rng_keys()),
).logits
loss, accuracy = cross_entropy_loss_and_accuracy_fn(logits, output_tokens, loss_masks)
metrics = dict(
loss=loss,
accuracy=accuracy,
)
return rng_generator(), metrics
train_state_shapes = jax.eval_shape(init_fn, next_rng())
train_state_partition = match_partition_rules(
GPTConfig.get_partition_rules(FLAGS.gpt.scan_layers), train_state_shapes
)
num_params = sum(x.size for x in jax.tree_leaves(train_state_shapes.params))
num_nonembed_params = num_params - gpt_config.vocab_size * gpt_config.n_embd
param_stats = {"num_params": num_params,"num_nonembed_params": num_nonembed_params}
logger.log(param_stats)
tqdm.write("\n" + pprint.pformat(param_stats) + "\n")
shard_fns, gather_fns = make_shard_and_gather_fns(
train_state_partition, train_state_shapes
)
checkpointer = StreamingCheckpointer(
FLAGS.checkpointer, logger.output_dir,
enable=jax.process_index() == 0,
)
sharded_init_fn = pjit(
init_fn,
in_shardings=PS(),
out_shardings=train_state_partition
)
sharded_create_trainstate_from_params = pjit(
create_trainstate_from_params,
in_shardings=(train_state_partition.params, ),
out_shardings=train_state_partition,
donate_argnums=(0, ),
)
sharded_train_step = pjit(
train_step,
in_shardings=(train_state_partition, PS(), PS()),
out_shardings=(train_state_partition, PS(), PS()),
donate_argnums=(0, 1),
)
sharded_eval_step = pjit(
eval_step,
in_shardings=(train_state_partition, PS(), PS()),
out_shardings=(PS(), PS()),
donate_argnums=(1,),
)
def save_checkpoint(train_state, milestone=False):
step = int(jax.device_get(train_state.step))
metadata = dict(
step=step,
variant=variant,
flags=flags_config_dict,
gpt_config=gpt_config.to_dict(),
)
checkpointer.save_all(
train_state=train_state,
gather_fns=gather_fns,
metadata=metadata,
dataset=dataset.get_state_dict(),
milestone=milestone,
)
if FLAGS.profile_steps > 0:
import os
os.makedirs(logger.profile_dir, exist_ok=True)
mesh = GPTConfig.get_jax_mesh(FLAGS.mesh_dim)
with mesh:
train_state, restored_params = None, None
if train_state is None and restored_params is None:
# Initialize from scratch
train_state = sharded_init_fn(next_rng())
elif train_state is None and restored_params is not None:
# Restore from params but initialize train_state
train_state = sharded_create_trainstate_from_params(restored_params)
del restored_params
sharded_rng = next_rng()
# warmup
for batch, dataset_metrics in dataset:
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
break
# profile
jax.profiler.start_trace(logger.profile_dir)
for step, (batch, dataset_metrics) in zip(trange(FLAGS.profile_steps), dataset):
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
jax.block_until_ready(train_state)
jax.profiler.save_device_memory_profile(f'{logger.profile_dir}/memory{step}.prof')
jax.profiler.stop_trace()
if FLAGS.stop_after_profile:
exit()
mesh = GPTConfig.get_jax_mesh(FLAGS.mesh_dim)
with mesh:
train_state, restored_params = None, None
if FLAGS.load_checkpoint != '':
load_type, load_path = FLAGS.load_checkpoint.split('::', 1)
if load_type == 'huggingface':
restored_params = tree_apply(
shard_fns.params, gpt_config.load_pretrained(load_path)
)
train_state = None
else:
train_state, restored_params = checkpointer.load_trainstate_checkpoint(
FLAGS.load_checkpoint, train_state_shapes, shard_fns
)
if train_state is None and restored_params is None:
# Initialize from scratch
train_state = sharded_init_fn(next_rng())
elif train_state is None and restored_params is not None:
# Restore from params but initialize train_state
train_state = sharded_create_trainstate_from_params(restored_params)
del restored_params
start_step = int(jax.device_get(train_state.step))
if FLAGS.save_model_freq > 0:
save_checkpoint(train_state)
sharded_rng = next_rng()
step_counter = trange(start_step, FLAGS.total_steps, ncols=0)
def run_eval(sharded_rng, eval_fn, batch, eval_steps, eval_name):
eval_metric_list = []
for _ in range(eval_steps):
sharded_rng, eval_metrics = eval_fn(
train_state, sharded_rng, batch
)
eval_metric_list.append(eval_metrics)
log_metrics = get_metrics(eval_metric_list, stack=True)
mean_metrics = {
f"{eval_name}/{k}": np.mean(v)
for k, v in log_metrics.items()
}
mean_metrics["step"] = step
logger.log(mean_metrics)
tqdm.write("\n" + pprint.pformat(mean_metrics) + "\n")
return sharded_rng
for step, (batch, dataset_metrics) in zip(step_counter, dataset):
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
if step % FLAGS.log_freq == 0:
if FLAGS.eval_steps > 0:
batch, _ = next(eval_iterator)
sharded_rng = run_eval(sharded_rng, sharded_eval_step,
batch, FLAGS.eval_steps, "val")
log_metrics = {"step": step}
log_metrics.update(metrics)
log_metrics.update(dataset_metrics)
log_metrics = jax.device_get(log_metrics)
logger.log(log_metrics)
tqdm.write("\n" + pprint.pformat(log_metrics) + "\n")
if FLAGS.save_milestone_freq > 0 and (step + 1) % FLAGS.save_milestone_freq == 0:
save_checkpoint(train_state, milestone=True)
elif FLAGS.save_model_freq > 0 and (step + 1) % FLAGS.save_model_freq == 0:
save_checkpoint(train_state)
if FLAGS.save_model_freq > 0:
save_checkpoint(train_state)
if __name__ == "__main__":
utils.run(main)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/train.py |
import dataclasses
import pprint
import time
from functools import partial
import json
from multiprocessing import Pool
import h5py
import bpt.tools.utils as utils
from ml_collections.config_dict import config_dict
from ml_collections import ConfigDict
from tqdm import tqdm, trange
import numpy as np
from datasets import load_dataset
class TextProcessor(object):
""" Example processor that converts a dictionary of texts into tokens. """
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.fields_from_example = ''
config.fields = ''
config.subfield_separator = ' '
config.add_eos_token = True
config.prepend_text = ''
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, tokenizer):
self.config = self.get_default_config(config)
assert self.config.fields != '' or self.config.fields_from_example != '', (
'Either fields or fields_from_example must be specified.'
)
self.tokenizer = tokenizer
def __call__(self, example, has_aux=False):
if has_aux:
example, *aux = example
else:
aux = tuple()
token_buffer = []
loss_mask_buffer = []
if self.config.fields_from_example != '':
fields = example[self.config.fields_from_example].split(',')
else:
fields = self.config.fields.split(',')
for i, field in enumerate(fields):
if field.startswith('[') and field.endswith(']'):
# No loss for this field.
field = field[1:-1]
mask = 0.0
else:
mask = 1.0
if field == '<|bos|>':
token_buffer.append(self.tokenizer.bos_token_id)
loss_mask_buffer.append(mask)
elif field == '<|eos|>':
token_buffer.append(self.tokenizer.eos_token_id)
loss_mask_buffer.append(mask)
else:
subfields = field.split('+')
text = self.config.subfield_separator.join(
[example[subfield] for subfield in subfields]
)
if i == 0:
text = self.config.prepend_text + text
tokens = self.tokenizer.encode(text)
token_buffer.extend(tokens)
loss_mask_buffer.extend([mask for _ in range(len(tokens))])
if self.config.add_eos_token:
token_buffer.append(self.tokenizer.eos_token_id)
loss_mask_buffer.append(1.0)
return token_buffer, loss_mask_buffer, *aux
class Dataset(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.path = ''
config.seq_length = 1024
config.batch_size = 8
config.start_seek_loc = 0
config.index_at_start = 0
config.tokenizer_processes = 1
config.tokenizer_parallel_chunk_size = 32
config.tokenizer_parallel_batch_size = 1024
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, tokenizer, text_processor):
self.config = self.get_default_config(config)
assert self.config.path != ''
self._tokenizer = tokenizer
self._text_processor = text_processor
self._index = self.config.index_at_start
self._file_loc = self.config.start_seek_loc
self._n_batch = 0
def parse_json(self, line):
if not line or line == '\n':
return None
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
print(f'Error parsing json line:\n{line}')
return None
return data
def json_iterator(self):
with utils.open_file(self.config.path, 'r') as fin:
fin.seek(self._file_loc)
while True:
line = fin.readline()
self._file_loc = fin.tell()
if not line: # Reached EOF
self._index = 0
fin.seek(0)
continue
data = self.parse_json(line)
if data is not None:
# JSON parsing succeeded
yield data, self._file_loc, self._index
self._index += 1
def batched(self, iterator, batch_size):
batch = []
for example in iterator:
batch.append(example)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def parallel_example_iterator(self):
if self.config.tokenizer_processes == 1:
for example, loc, index in self.json_iterator():
yield self.text_processor((example, loc, index), has_aux=True)
else:
process_pool = Pool(self.config.tokenizer_processes)
batched_iterator = self.batched(
self.json_iterator(), self.config.tokenizer_parallel_batch_size
)
with process_pool as pool:
map_fn = partial(self.text_processor, has_aux=True)
next_batch = pool.map_async(
map_fn, next(batched_iterator),
chunksize=self.config.tokenizer_parallel_chunk_size
)
while True:
current_batch = next_batch
next_batch = pool.map_async(
map_fn, next(batched_iterator),
chunksize=self.config.tokenizer_parallel_chunk_size
)
for example in current_batch.get():
yield example
def __iter__(self):
chunk_size = self.config.batch_size * self.config.seq_length
token_buffer = []
loss_mask_buffer = []
total_tokens = 0
last_time = 0.0
for tokens, loss_masks, loc, index in self.parallel_example_iterator():
token_buffer.extend(tokens)
loss_mask_buffer.extend(loss_masks)
while len(token_buffer) > chunk_size + 1:
total_tokens += chunk_size
metrics = {
'dataset_file_loc': loc,
'dataset_example_index': index,
'dataset_total_tokens': total_tokens,
'dataset_throughput_tps': chunk_size / (time.time() - last_time),
}
last_time = time.time()
input_tokens = np.array(token_buffer[:chunk_size], dtype=np.int32)
output_tokens = np.array(token_buffer[1:chunk_size+1], dtype=np.int32)
# reshape to batch_size x seq_length
input_tokens = input_tokens.reshape(self.config.batch_size, -1)
output_tokens = output_tokens.reshape(self.config.batch_size, -1)
loss_masks = np.array(loss_mask_buffer[:chunk_size], dtype=np.float32).reshape(self.config.batch_size, -1)
yield {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"loss_masks": loss_masks,
}, metrics
token_buffer = token_buffer[chunk_size:]
loss_mask_buffer = loss_mask_buffer[chunk_size:]
def val_iter(self):
chunk_size = self.config.batch_size * self.config.seq_length
token_buffer = []
loss_mask_buffer = []
total_tokens = 0
last_time = 0.0
for tokens, loss_masks, loc, index in self.parallel_example_iterator():
token_buffer.extend(tokens)
loss_mask_buffer.extend(loss_masks)
while len(token_buffer) > chunk_size + 1:
total_tokens += chunk_size
metrics = {
'dataset_file_loc': loc,
'dataset_example_index': index,
'dataset_total_tokens': total_tokens,
'dataset_throughput_tps': chunk_size / (time.time() - last_time),
}
last_time = time.time()
input_tokens = np.array(token_buffer[:chunk_size], dtype=np.int32)
output_tokens = np.array(token_buffer[1:chunk_size+1], dtype=np.int32)
# reshape to batch_size x seq_length
input_tokens = input_tokens.reshape(self.config.batch_size, -1)
output_tokens = output_tokens.reshape(self.config.batch_size, -1)
loss_masks = np.array(loss_mask_buffer[:chunk_size], dtype=np.float32).reshape(self.config.batch_size, -1)
yield {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"loss_masks": loss_masks,
}, metrics
token_buffer = token_buffer[chunk_size:]
loss_mask_buffer = loss_mask_buffer[chunk_size:]
def get_state_dict(self):
return dict(
config=self.config,
index=self._index,
file_loc=self._file_loc,
)
def load_state_dict(self, state_dict):
self.config = state_dict.get('config', self.config)
self._index = state_dict.get('index', self.config.index_at_start)
self._file_loc = state_dict.get('file_loc', self.config.start_seek_loc)
@property
def seq_length(self):
return self.config.seq_length
@property
def tokenizer(self):
return self._tokenizer
@property
def text_processor(self):
return self._text_processor
@property
def vocab_size(self):
return len(self.tokenizer)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/data.py |
import functools
import json
import math
from functools import partial
from typing import Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int # not used
k_chunk_size: int # not used
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = None # not used
prevent_cse: bool = False # not used
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
self.causal_mask = make_causal_mask(jnp.ones((1, self.max_position_embeddings), dtype="bool"), dtype="bool")
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(
hidden_states,
position_ids,
deterministic=deterministic,
)
query_length, key_length = query.shape[1], key.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
batch_size = hidden_states.shape[0]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
if self.causal:
attention_mask = combine_masks(attention_mask, causal_mask)
else:
attention_mask = attention_mask
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
return outputs
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/vanilla'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/blocks/vanilla.py |
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
def forward_query(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
query = self._split_heads(query)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
q_rot = apply_rotary_pos_emb(q_rot, sincos)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
return query
def forward_key_value(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
if self.float32_logits:
key = key.astype(jnp.float32)
return key, value
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
outputs = blockwise_compute(
self.attn,
hidden_states,
hidden_states,
position_ids,
num_heads=self.num_heads,
bias=attention_bias,
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute(cell,
q_inputs,
kv_inputs,
position_ids,
num_heads,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = q_inputs.shape[1]
kv_len = kv_inputs.shape[1]
q_inputs = rearrange(q_inputs, 'b (n c) d -> b n c d', c=query_chunk_size)
kv_inputs = rearrange(kv_inputs, 'b (n c) d -> b n c d', c=key_chunk_size)
q_inputs, kv_inputs = map(lambda t: rearrange(t, 'b n c d -> n b c d'), (q_inputs, kv_inputs))
num_q, batch, _, _ = q_inputs.shape
num_kv, _, _, _ = kv_inputs.shape
q_position_ids = rearrange(position_ids, 'b (n c) -> n b c', c=query_chunk_size)
kv_position_ids = rearrange(position_ids, 'b (n c) -> n b c', c=key_chunk_size)
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(cell, _, args):
input_chunk, query_chunk_idx, query_position_ids_chunk = args
query_chunk = cell.forward_query(input_chunk, query_position_ids_chunk)
query_chunk = query_chunk / jnp.sqrt(query_chunk.shape[-1])
dim_per_head = query_chunk.shape[-1]
def summarize_chunk(cell, carry, args):
kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
(numerator, denominator, prev_max_score) = carry
key_chunk, value_chunk = cell.forward_key_value(kv_chunk, kv_position_ids_chunk)
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
summarize_chunk = nn.remat(
summarize_chunk,
variables="params",
rngs={"params" : False, "dropout": False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
(numerator, denominator, max_score), _ = nn.scan(
summarize_chunk,
variable_broadcast="params",
split_rngs={"params" : False, "dropout": False},
in_axes=0,
out_axes=0,
length=num_kv,
)(cell, init_carry, (kv_inputs, jnp.arange(0, num_kv), kv_position_ids))
attn_chunk = (numerator / denominator).astype(dtype)
attn_chunk = cell.attn_out_proj(attn_chunk, deterministic)
ffn_chunk = cell.forward_ffn(attn_chunk + input_chunk, deterministic)
outputs = ffn_chunk + attn_chunk + input_chunk
return _, outputs
_query_chunk_attention = nn.remat(
_query_chunk_attention,
variables="params",
rngs={"params" : False, "dropout": False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
_query_chunk_attention,
variable_broadcast="params",
split_rngs={"params" : False, "dropout": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, (q_inputs, jnp.arange(0, num_q), q_position_ids))
res = rearrange(res, 'n b c d -> b (n c) d')
return res
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_v1'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/blocks/blockwise_parallel_v1.py |
Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/blocks/__init__.py |
|
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = blockwise_compute_ffn(
self.attn,
hidden_states + attn_output,
chunk_size=self.q_chunk_size,
deterministic=deterministic,
policy=self.policy,
prevent_cse=self.prevent_cse,
)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
def blockwise_compute_ffn(cell, inputs, chunk_size, deterministic, policy, prevent_cse):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def ffn(cell, _, hidden_states):
outputs = cell.forward_ffn(hidden_states, deterministic=deterministic)
return _, outputs
ffn_remat = nn.remat(
ffn,
variables="params",
rngs={"params" : False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
ffn_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
class Blockwise_LM_Head(nn.Module):
vocab_size: int
chunk_size: int
policy: str = 'nothing_saveable'
dtype: jnp.dtype = jnp.float32
prevent_cse: bool = False
def setup(self):
self.lm_head = nn.Dense(
self.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(self, inputs):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=self.chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def lm_head(cell, _, hidden_states):
outputs = cell(hidden_states)
return _, outputs
lm_head_remat = nn.remat(
lm_head,
variables="params",
rngs={"params" : False},
prevent_cse=self.prevent_cse,
policy=get_gradient_checkpoint_policy(self.policy),
)
_, res = nn.scan(
lm_head_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(self.lm_head, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
def blockwise_cross_entropy(logits, tokens, valid=None,
chunk_size=None, policy=None, prevent_cse=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
logits = jnp.reshape(logits, (-1, logits.shape[-1]))
tokens = jnp.reshape(tokens, (-1,))
valid = jnp.reshape(valid, (-1,))
def _cross_entropy_loss_and_accuracy(logits, tokens, valid):
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
return token_log_prob, correct, valid_text_length
@partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def _loss_and_accuracy(carry, args):
loss, accuracy, num = carry
logits, tokens, valid = args
token_log_prob, correct, valid_text_length = \
_cross_entropy_loss_and_accuracy(logits, tokens, valid)
loss = loss + jnp.sum(token_log_prob, axis=-1) / valid_text_length
accuracy = accuracy + jnp.sum(correct, axis=-1) / valid_text_length
num = num + 1
return (loss, accuracy, num), None
num_chunk = logits.shape[0] // chunk_size
logits = rearrange(logits, '(n c) d -> n c d', c=chunk_size)
tokens = rearrange(tokens, '(n c) -> n c', c=chunk_size)
valid = rearrange(valid, '(n c) -> n c', c=chunk_size)
(loss, accuracy, num), _ = jax.lax.scan(
_loss_and_accuracy, (0.0, 0.0, 0), xs=(logits, tokens, valid),
length=num_chunk,
)
loss = - loss / num
accuracy = accuracy / num
return loss, accuracy
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_simplified'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/blocks/blockwise_parallel.py |
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(
hidden_states,
position_ids,
deterministic=deterministic,
)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/memeff'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/blocks/memeff.py |
import os
import numpy as np
from ml_collections import ConfigDict
import bpt.tools.utils as utils
import jax
import jax.numpy as jnp
import flax
from flax.serialization import (
from_bytes, to_bytes, to_state_dict, from_state_dict
)
from flax.traverse_util import flatten_dict, unflatten_dict, empty_node
import msgpack
from bpt.tools.jax_utils import tree_apply, float_tensor_to_dtype
class StreamingCheckpointer(object):
""" Custom msgpack checkpointer that saves large train states by serializing
and saving tensors one by one in a streaming fashion. Avoids running
out of memory or local TPU disk with default flax checkpointer.
"""
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.float_dtype = 'bf16'
config.save_optimizer_state = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, checkpoint_dir, enable=True):
self.config = self.get_default_config(config)
self.checkpoint_dir = checkpoint_dir
self.enable = enable
def save_checkpoint(self, train_state, filename, gather_fns=None):
if self.enable:
path = os.path.join(self.checkpoint_dir, filename)
else:
path = '/dev/null'
self.save_train_state_to_file(
train_state, path, gather_fns, self.config.float_dtype
)
@staticmethod
def save_train_state_to_file(train_state, path, gather_fns=None, float_dtype=None):
train_state = to_state_dict(train_state)
packer = msgpack.Packer()
flattend_train_state = flatten_dict(train_state)
if gather_fns is not None:
gather_fns = flatten_dict(to_state_dict(gather_fns))
with utils.open_file(path, "wb") as fout:
for key, value in flattend_train_state.items():
if gather_fns is not None:
value = gather_fns[key](value)
value = float_tensor_to_dtype(value, float_dtype)
fout.write(packer.pack((key, to_bytes(value))))
def save_pickle(self, obj, filename):
if self.enable:
path = os.path.join(self.checkpoint_dir, filename)
else:
path = '/dev/null'
utils.save_pickle(obj, path)
def save_all(self, train_state, gather_fns, metadata=None, dataset=None, milestone=False):
step = int(jax.device_get(train_state.step))
if self.config.save_optimizer_state:
checkpoint_state = train_state
checkpoint_name = 'streaming_train_state'
checkpoint_gather_fns = gather_fns
else:
checkpoint_state = train_state.params['params']
checkpoint_name = 'streaming_params'
checkpoint_gather_fns = gather_fns.params['params']
if milestone:
# Save a milestone checkpoint that will not be overwritten
self.save_pickle(metadata, f'metadata_{step}.pkl')
self.save_pickle(dataset, f'dataset_{step}.pkl')
self.save_checkpoint(
checkpoint_state, f'{checkpoint_name}_{step}', checkpoint_gather_fns
)
else:
# Save a normal checkpoint that can be overwritten
self.save_pickle(metadata, 'metadata.pkl')
self.save_pickle(dataset, 'dataset.pkl')
self.save_checkpoint(
checkpoint_state, f'{checkpoint_name}', checkpoint_gather_fns
)
@staticmethod
def load_checkpoint(path, target=None, shard_fns=None, remove_dict_prefix=None):
if shard_fns is not None:
shard_fns = flatten_dict(
to_state_dict(shard_fns)
)
if remove_dict_prefix is not None:
remove_dict_prefix = tuple(remove_dict_prefix)
flattend_train_state = {}
with utils.open_file(path) as fin:
# 83886080 bytes = 80 MB, which is 16 blocks on GCS
unpacker = msgpack.Unpacker(fin, read_size=83886080, max_buffer_size=0)
for key, value in unpacker:
key = tuple(key)
if remove_dict_prefix is not None:
if key[:len(remove_dict_prefix)] == remove_dict_prefix:
key = key[len(remove_dict_prefix):]
else:
continue
tensor = from_bytes(None, value)
if shard_fns is not None:
tensor = shard_fns[key](tensor)
flattend_train_state[key] = tensor
if target is not None:
flattened_target = flatten_dict(
to_state_dict(target), keep_empty_nodes=True
)
for key, value in flattened_target.items():
if key not in flattend_train_state and value == empty_node:
flattend_train_state[key] = value
train_state = unflatten_dict(flattend_train_state)
if target is None:
return train_state
return from_state_dict(target, train_state)
@staticmethod
def load_flax_checkpoint(path, target=None, shard_fns=None):
""" Load a standard flax checkpoint that's not saved with the
msgpack streaming format.
"""
with utils.open_file(path, "rb") as fin:
encoded_bytes = fin.read()
state_dict = flax.serialization.msgpack_restore(encoded_bytes)
if shard_fns is not None:
shard_fns = to_state_dict(shard_fns)
state_dict = tree_apply(shard_fns, state_dict)
if target is None:
return state_dict
return from_state_dict(target, state_dict)
@classmethod
def load_trainstate_checkpoint(cls, load_from, trainstate_target=None,
trainstate_shard_fns=None,
disallow_trainstate=False):
if trainstate_target is not None:
params_target = trainstate_target.params['params']
else:
params_target = None
if trainstate_shard_fns is not None:
params_shard_fns = trainstate_shard_fns.params['params']
else:
params_shard_fns = None
load_type, load_path = load_from.split('::', 1)
if disallow_trainstate:
assert load_type != 'trainstate', 'Loading full trainstate is not allowed!'
train_state = None
restored_params = None
if load_type == 'trainstate':
# Load the entire train state in the streaming format
train_state = cls.load_checkpoint(
path=load_path,
target=trainstate_target,
shard_fns=trainstate_shard_fns,
)
elif load_type == 'trainstate_params':
# Load the params part of the train state in the streaming format
restored_params = cls.load_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns,
remove_dict_prefix=('params', 'params'),
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
elif load_type == 'params':
# Load the params in the streaming format
restored_params = cls.load_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns,
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
elif load_type == 'flax_params':
# Load the params in the standard flax format (non-streaming)
# This requires the entire params to fit in memory
restored_params = cls.load_flax_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
else:
raise ValueError(f'Invalid load_from type: {load_type}')
return train_state, restored_params
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/tools/checkpoint.py |
Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/tools/__init__.py |
|
import os
import math
from typing import Any, Mapping, Text, Tuple, Union, NamedTuple
from functools import partial
import re
import dataclasses
import random
import dill
import flax
import jax
import jax.numpy as jnp
from jax.sharding import PartitionSpec as PS
from jax.sharding import Mesh
from jax.experimental.pjit import with_sharding_constraint as _with_sharding_constraint
from jax.experimental.pjit import pjit
from jax.interpreters import pxla
import numpy as np
from absl import logging
from flax import jax_utils
from flax.training.train_state import TrainState
from flax.core import FrozenDict
import optax
from transformers import FlaxLogitsWarper
class JaxRNG(object):
""" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside
pure function.
"""
@classmethod
def from_seed(cls, seed):
return cls(jax.random.PRNGKey(seed))
def __init__(self, rng):
self.rng = rng
def __call__(self, keys=None):
if keys is None:
self.rng, split_rng = jax.random.split(self.rng)
return split_rng
elif isinstance(keys, int):
split_rngs = jax.random.split(self.rng, num=keys + 1)
self.rng = split_rngs[0]
return tuple(split_rngs[1:])
else:
split_rngs = jax.random.split(self.rng, num=len(keys) + 1)
self.rng = split_rngs[0]
return {key: val for key, val in zip(keys, split_rngs[1:])}
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
""" JIT traceable version of FlaxLogitsWarper that performs temperature scaling."""
def __init__(self, temperature):
self.temperature = temperature
def __call__(self, input_ids, scores, cur_len):
return scores / jnp.clip(self.temperature, a_min=1e-8)
def make_shard_and_gather_fns(partition_specs, dtype_specs=None):
""" Create pytree of sharding and gathering functions from pytree of
partition specs.
"""
float_dtypes = (jnp.bfloat16, jnp.float16, jnp.float32, jnp.float64)
def make_to_dtype_fn(dtype_spec):
def to_dtype(tensor):
if dtype_specs in float_dtypes and getattr(tensor, 'dtype', None) in float_dtypes:
# Convert all float tensors to the same dtype
return tensor.astype(dtype_specs)
elif hasattr(dtype_spec, 'dtype') and hasattr(tensor, 'dtype'):
return tensor.astype(dtype_spec.dtype)
return tensor
return to_dtype
def make_shard_fn(partition_spec, dtype_spec=None):
jax_shard_function = pjit(
make_to_dtype_fn(dtype_spec),
in_shardings=None,
out_shardings=partition_spec
)
def shard_fn(tensor):
return jax_shard_function(tensor).block_until_ready()
return shard_fn
def make_gather_fn(partition_spec, dtype_spec=None):
jax_gather_fn = pjit(
make_to_dtype_fn(dtype_spec),
in_shardings=partition_spec,
out_shardings=None
)
def gather_fn(tensor):
return jax.device_get(jax_gather_fn(tensor))
return gather_fn
if dtype_specs is None or dtype_specs in float_dtypes:
shard_fns = jax.tree_util.tree_map(make_shard_fn, partition_specs)
gather_fns = jax.tree_util.tree_map(make_gather_fn, partition_specs)
else:
shard_fns = jax.tree_util.tree_map(
make_shard_fn, partition_specs, dtype_specs
)
gather_fns = jax.tree_util.tree_map(
make_gather_fn, partition_specs, dtype_specs
)
return shard_fns, gather_fns
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
init_rng(seed)
def get_jax_mesh(axis_dims, names):
if ':' in axis_dims:
dims = []
dim_names = []
for axis in axis_dims.split(','):
name, dim = axis.split(':')
assert name in names
dims.append(int(dim))
dim_names.append(name)
assert(set(dim_names) == set(names))
else:
dims = [int(x) for x in axis_dims.split(',')]
dim_names = names
assert len(dims) == len(names)
return Mesh(np.array(jax.devices()).reshape(dims), dim_names)
def names_in_current_mesh(*names):
""" Check if current mesh axes contain these names. """
mesh_axis_names = pxla.thread_resources.env.physical_mesh.axis_names
return set(names) <= set(mesh_axis_names)
def get_names_from_parition_spec(partition_specs):
""" Return axis names from partition specs. """
names = set()
if isinstance(partition_specs, dict):
partition_specs = partition_specs.values()
for item in partition_specs:
if item is None:
continue
elif isinstance(item, str):
names.add(item)
else:
names.update(get_names_from_parition_spec(item))
return list(names)
def with_sharding_constraint(x, partition_specs):
""" A smarter version of with_sharding_constraint that only applies the
constraint if the current mesh contains the axes in the partition specs.
"""
axis_names = get_names_from_parition_spec(partition_specs)
if names_in_current_mesh(*axis_names):
x = _with_sharding_constraint(x, partition_specs)
return x
def wrap_function_with_rng(rng):
""" To be used as decorator, automatically bookkeep a RNG for the wrapped function. """
def wrap_function(function):
def wrapped(*args, **kwargs):
nonlocal rng
rng, split_rng = jax.random.split(rng)
return function(split_rng, *args, **kwargs)
return wrapped
return wrap_function
def init_rng(seed):
global jax_utils_rng
jax_utils_rng = JaxRNG.from_seed(seed)
def next_rng(*args, **kwargs):
global jax_utils_rng
return jax_utils_rng(*args, **kwargs)
def get_metrics(metrics, unreplicate=False, stack=False):
if unreplicate:
metrics = flax.jax_utils.unreplicate(metrics)
metrics = jax.device_get(metrics)
if stack:
return jax.tree_map(lambda *args: np.stack(args), *metrics)
else:
return {key: float(val) for key, val in metrics.items()}
def mse_loss(val, target, valid=None):
if valid is None:
valid = jnp.ones((*target.shape[:2], 1))
valid = valid.astype(jnp.float32)
loss = jnp.mean(
jnp.where(
valid > 0.0,
jnp.square(val - target),
0.0
)
)
return loss
def cross_entropy_loss(logits, labels, smoothing_factor=0.):
num_classes = logits.shape[-1]
if labels.dtype == jnp.int32 or labels.dtype == jnp.int64:
labels = jax.nn.one_hot(labels, num_classes)
if smoothing_factor > 0.:
labels = labels * (1. - smoothing_factor) + smoothing_factor / num_classes
logp = jax.nn.log_softmax(logits, axis=-1)
return -jnp.mean(jnp.sum(logp * labels, axis=-1))
def cross_entropy_loss_and_accuracy(logits, tokens, valid=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
loss = -jnp.mean(jnp.sum(token_log_prob, axis=-1) / valid_text_length)
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
accuracy = jnp.mean(jnp.sum(correct, axis=-1) / valid_text_length)
return loss, accuracy
def global_norm(tree):
""" Return the global L2 norm of a pytree. """
squared = jax.tree_util.tree_map(lambda x: jnp.sum(jnp.square(x)), tree)
flattened, _ = jax.flatten_util.ravel_pytree(squared)
return jnp.sqrt(jnp.sum(flattened))
def average_metrics(metrics):
return jax.tree_map(
lambda *args: jnp.mean(jnp.stack(args)),
*metrics
)
def get_float_dtype_by_name(dtype):
return {
'bf16': jnp.bfloat16,
'fp16': jnp.float16,
'fp32': jnp.float32,
'fp64': jnp.float64,
}[dtype]
def float_tensor_to_dtype(tensor, dtype):
if dtype is None or dtype == '':
return tensor
if isinstance(dtype, str):
dtype = get_float_dtype_by_name(dtype)
float_dtypes = (jnp.bfloat16, jnp.float16, jnp.float32, jnp.float64)
if getattr(tensor, 'dtype', None) in float_dtypes:
tensor = tensor.astype(dtype)
return tensor
def float_to_dtype(tree, dtype):
return jax.tree_util.tree_map(
partial(float_tensor_to_dtype, dtype=dtype), tree
)
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
def tree_path_to_string(path, sep=None):
keys = []
for key in path:
if isinstance(key, jax.tree_util.SequenceKey):
keys.append(str(key.idx))
elif isinstance(key, jax.tree_util.DictKey):
keys.append(str(key.key))
elif isinstance(key, jax.tree_util.GetAttrKey):
keys.append(str(key.name))
elif isinstance(key, jax.tree_util.FlattenedIndexKey):
keys.append(str(key.key))
else:
keys.append(str(key))
if sep is None:
return tuple(keys)
return sep.join(keys)
def flatten_tree(xs, is_leaf=None, sep=None):
flattened, _ = jax.tree_util.tree_flatten_with_path(xs, is_leaf=is_leaf)
output = {}
for key, val in flattened:
output[tree_path_to_string(key, sep=sep)] = val
return output
def named_tree_map(f, tree, *rest, is_leaf=None, sep=None):
""" An extended version of jax.tree_util.tree_map, where the mapped function
f takes both the name (path) and the tree leaf as input.
"""
return jax.tree_util.tree_map_with_path(
lambda path, x, *r: f(tree_path_to_string(path, sep=sep), x, *r),
tree, *rest,
is_leaf=is_leaf
)
def match_partition_rules(rules, params):
""" Returns a pytree of PartitionSpec according to rules. Supports handling
Flax TrainState and Optax optimizer state.
"""
def get_partition_spec(name, leaf):
if len(leaf.shape) == 0 or np.prod(leaf.shape) == 1:
""" Don't partition scalar values. """
return PS()
for rule, ps in rules:
if re.search(rule, name) is not None:
return ps
raise ValueError(f'Partition rule not found for param: {name}')
return named_tree_map(get_partition_spec, params, sep='/')
def get_weight_decay_mask(exclusions):
""" Return a weight decay mask function that computes the pytree masks
according to the given exclusion rules.
"""
def decay(name, _):
for rule in exclusions:
if re.search(rule, name) is not None:
return False
return True
def weight_decay_mask(params):
return named_tree_map(decay, params, sep='/')
return weight_decay_mask
def tree_apply(fns, tree):
""" Apply a pytree of functions to the pytree. """
return jax.tree_util.tree_map(lambda fn, x: fn(x), fns, tree)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/tools/jax_utils.py |
import os
import time
from typing import Any, Mapping, Text, Tuple, Union, NamedTuple
from functools import partial
import re
import dataclasses
import random
from ml_collections.config_dict import config_dict
from ml_collections import ConfigDict
import jax
import jax.numpy as jnp
import numpy as np
from absl import logging
import optax
from bpt.tools.jax_utils import float_to_dtype
class OptimizerFactory(object):
""" Configurable optax optimizer factory. """
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.accumulate_gradient_steps = 1
config.type = 'adamw'
config.palm_optimizer = PalmOptimizerFactory.get_default_config()
config.adamw_optimizer = AdamWOptimizerFactory.get_default_config()
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
if config.type == 'palm':
optimizer, optimizer_info = PalmOptimizerFactory.get_optimizer(
config.palm_optimizer, weight_decay_mask
)
elif config.type == 'adamw':
optimizer, optimizer_info = AdamWOptimizerFactory.get_optimizer(
config.adamw_optimizer, weight_decay_mask
)
else:
raise ValueError(f'Unknown optimizer type: {config.type}')
if config.accumulate_gradient_steps > 1:
optimizer = optax.MultiSteps(
optimizer, config.accumulate_gradient_steps
)
return optimizer, optimizer_info
class PalmOptimizerFactory(object):
""" PaLM optimizer factory. This optimizer implements the optimizer
described in the PaLM paper: https://arxiv.org/abs/2204.02311
"""
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.lr = 0.01
config.lr_warmup_steps = 10000
config.b1 = 0.9
config.b2 = 0.99
config.clip_gradient = 1.0
config.weight_decay = 1e-4
config.bf16_momentum = True
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
def learning_rate_schedule(step):
multiplier = config.lr / 0.01
return multiplier / jnp.sqrt(jnp.maximum(step, config.lr_warmup_steps))
def weight_decay_schedule(step):
multiplier = config.weight_decay / 1e-4
return -multiplier * jnp.square(learning_rate_schedule(step))
optimizer_info = dict(
learning_rate_schedule=learning_rate_schedule,
weight_decay_schedule=weight_decay_schedule,
)
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adafactor(
learning_rate=learning_rate_schedule,
multiply_by_parameter_scale=True,
momentum=config.b1,
decay_rate=config.b2,
factored=False,
clipping_threshold=None,
dtype_momentum=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
optax_add_scheduled_weight_decay(
weight_decay_schedule, weight_decay_mask
)
)
return optimizer, optimizer_info
class AdamWOptimizerFactory(object):
""" AdamW optimizer with cosine schedule. """
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.init_lr = 0.0
config.end_lr = 0.001
config.lr = 0.01
config.lr_warmup_steps = 2000
config.lr_decay_steps = 500000
config.b1 = 0.9
config.b2 = 0.95
config.clip_gradient = 1.0
config.weight_decay = 1e-4
config.bf16_momentum = True
config.multiply_by_parameter_scale = True
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
learning_rate_schedule = optax.warmup_cosine_decay_schedule(
init_value=config.init_lr,
peak_value=config.lr,
warmup_steps=config.lr_warmup_steps,
decay_steps=config.lr_decay_steps,
end_value=config.end_lr,
)
optimizer_info = dict(
learning_rate_schedule=learning_rate_schedule,
)
if config.multiply_by_parameter_scale:
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adafactor(
learning_rate=learning_rate_schedule,
multiply_by_parameter_scale=True,
momentum=config.b1,
decay_rate=config.b2,
factored=False,
clipping_threshold=None,
dtype_momentum=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
optax_add_scheduled_weight_decay(
lambda step: -learning_rate_schedule(step) * config.weight_decay,
weight_decay_mask
)
)
else:
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adamw(
learning_rate=learning_rate_schedule,
weight_decay=config.weight_decay,
b1=0.9,
b2=0.95,
mask=weight_decay_mask,
mu_dtype=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
)
return optimizer, optimizer_info
class OptaxScheduledWeightDecayState(NamedTuple):
count: jnp.DeviceArray
def optax_add_scheduled_weight_decay(schedule_fn, mask=None):
""" Apply weight decay with schedule. """
def init_fn(params):
del params
return OptaxScheduledWeightDecayState(count=jnp.zeros([], jnp.int32))
def update_fn(updates, state, params):
if params is None:
raise ValueError('Params cannot be None for weight decay!')
weight_decay = schedule_fn(state.count)
updates = jax.tree_util.tree_map(
lambda g, p: g + weight_decay * p, updates, params
)
return updates, OptaxScheduledWeightDecayState(
count=optax.safe_int32_increment(state.count)
)
if mask is not None:
return optax.masked(optax.GradientTransformation(init_fn, update_fn), mask)
return optax.GradientTransformation(init_fn, update_fn)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/tools/optimizers.py |
import inspect
import logging
import os
import pprint
import random
import tempfile
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from io import BytesIO
from socket import gethostname
import dataclasses
import absl.flags
import absl.logging
import cloudpickle as pickle
import flax
import gcsfs
import jax
import jax.numpy as jnp
import msgpack
import numpy as np
import wandb
from flax.serialization import from_bytes, to_bytes
from ml_collections import ConfigDict
from ml_collections.config_dict.config_dict import placeholder
from ml_collections.config_flags import config_flags
from flax.training.train_state import TrainState
from flax.core import FrozenDict
from absl.app import run
class WandBLogger(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.project_id = ""
config.project_entity = placeholder(str)
config.experiment_id = placeholder(str)
config.append_uuid = True
config.experiment_note = placeholder(str)
config.output_dir = "/tmp/"
config.wandb_dir = ""
config.profile_dir = ""
config.online = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, variant, enable=True):
self.enable = enable
self.config = self.get_default_config(config)
if self.config.experiment_id is None or self.config.experiment_id == "":
self.config.experiment_id = uuid.uuid4().hex
else:
if self.config.append_uuid:
self.config.experiment_id = (
str(self.config.experiment_id) + "_" + uuid.uuid4().hex
)
else:
self.config.experiment_id = str(self.config.experiment_id)
if self.enable:
if self.config.output_dir == "":
self.config.output_dir = tempfile.mkdtemp()
else:
self.config.output_dir = os.path.join(
self.config.output_dir, self.config.experiment_id
)
if not self.config.output_dir.startswith("gs://"):
os.makedirs(self.config.output_dir, exist_ok=True)
if self.config.wandb_dir == "":
if not self.config.output_dir.startswith("gs://"):
# Use the same directory as output_dir if it is not a GCS path.
self.config.wandb_dir = self.config.output_dir
else:
# Otherwise, use a temporary directory.
self.config.wandb_dir = tempfile.mkdtemp()
else:
# Join the wandb_dir with the experiment_id.
self.config.wandb_dir = os.path.join(
self.config.wandb_dir, self.config.experiment_id
)
os.makedirs(self.config.wandb_dir, exist_ok=True)
if self.config.profile_dir == "":
if not self.config.output_dir.startswith("gs://"):
# Use the same directory as output_dir if it is not a GCS path.
self.config.profile_dir = self.config.output_dir
else:
# Otherwise, use a temporary directory.
self.config.profile_dir = tempfile.mkdtemp()
else:
# Join the profile_dir with the experiment_id.
self.config.profile_dir = os.path.join(
self.config.profile_dir, self.config.experiment_id
)
os.makedirs(self.config.profile_dir, exist_ok=True)
self._variant = flatten_config_dict(variant)
if "hostname" not in self._variant:
self._variant["hostname"] = gethostname()
if self.enable:
self.run = wandb.init(
reinit=True,
config=self._variant,
project=self.config.project_id,
dir=self.config.wandb_dir,
id=self.config.experiment_id,
resume="allow",
notes=self.config.experiment_note,
entity=self.config.project_entity,
settings=wandb.Settings(
start_method="thread",
_disable_stats=True,
),
mode="online" if self.config.online else "offline",
)
else:
self.run = None
def log(self, *args, **kwargs):
if self.enable:
self.run.log(*args, **kwargs)
def save_pickle(self, obj, filename):
if self.enable:
save_pickle(obj, os.path.join(self.config.output_dir, filename))
@property
def experiment_id(self):
return self.config.experiment_id
@property
def variant(self):
return self.config.variant
@property
def output_dir(self):
return self.config.output_dir
@property
def wandb_dir(self):
return self.config.wandb_dir
@property
def profile_dir(self):
return self.config.profile_dir
def config_dict(*args, **kwargs):
return ConfigDict(dict(*args, **kwargs))
def define_flags_with_default(**kwargs):
for key, val in kwargs.items():
if isinstance(val, tuple):
val, help_str = val
else:
help_str = ""
if isinstance(val, ConfigDict):
config_flags.DEFINE_config_dict(key, val)
elif isinstance(val, bool):
# Note that True and False are instances of int.
absl.flags.DEFINE_bool(key, val, help_str)
elif isinstance(val, int):
absl.flags.DEFINE_integer(key, val, help_str)
elif isinstance(val, float):
absl.flags.DEFINE_float(key, val, help_str)
elif isinstance(val, str):
absl.flags.DEFINE_string(key, val, help_str)
else:
raise ValueError("Incorrect value type")
return absl.flags.FLAGS, kwargs
def print_flags(flags, flags_def):
flag_srings = [
"{}: {}".format(key, val)
for key, val in get_user_flags(flags, flags_def).items()
]
logging.info(
"Hyperparameter configs: \n{}".format(
pprint.pformat(flag_srings)
)
)
def get_user_flags(flags, flags_def):
output = {}
for key in flags_def:
val = getattr(flags, key)
if isinstance(val, ConfigDict):
output.update(flatten_config_dict(val, prefix=key))
else:
output[key] = val
return output
def user_flags_to_config_dict(flags, flags_def):
output = ConfigDict()
for key in flags_def:
output[key] = getattr(flags, key)
return output
def flatten_config_dict(config, prefix=None):
output = {}
for key, val in config.items():
if isinstance(val, ConfigDict) or isinstance(val, dict):
output.update(flatten_config_dict(val, prefix=key))
else:
if prefix is not None:
output["{}.{}".format(prefix, key)] = val
else:
output[key] = val
return output
def function_args_to_config(fn, none_arg_types=None, exclude_args=None, override_args=None):
config = ConfigDict()
arg_spec = inspect.getargspec(fn)
n_args = len(arg_spec.defaults)
arg_names = arg_spec.args[-n_args:]
default_values = arg_spec.defaults
for name, value in zip(arg_names, default_values):
if exclude_args is not None and name in exclude_args:
continue
elif override_args is not None and name in override_args:
config[name] = override_args[name]
elif none_arg_types is not None and value is None and name in none_arg_types:
config[name] = placeholder(none_arg_types[name])
else:
config[name] = value
return config
def prefix_metrics(metrics, prefix):
return {"{}/{}".format(prefix, key): value for key, value in metrics.items()}
def open_file(path, mode='rb', cache_type='readahead'):
if path.startswith("gs://"):
logging.getLogger("fsspec").setLevel(logging.WARNING)
return gcsfs.GCSFileSystem().open(path, mode, cache_type=cache_type)
else:
return open(path, mode)
def save_pickle(obj, path):
with open_file(path, "wb") as fout:
pickle.dump(obj, fout)
def load_pickle(path):
with open_file(path, "rb") as fin:
data = pickle.load(fin)
return data
def text_to_array(text, encoding="utf-8"):
return np.frombuffer(text.encode(encoding), dtype="uint8")
def array_to_text(array, encoding="utf-8"):
with BytesIO(array) as fin:
text = fin.read().decode(encoding)
return text
class JaxRNG(object):
""" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside
pure function.
"""
@classmethod
def from_seed(cls, seed):
return cls(jax.random.PRNGKey(seed))
def __init__(self, rng):
self.rng = rng
def __call__(self, keys=None):
if keys is None:
self.rng, split_rng = jax.random.split(self.rng)
return split_rng
elif isinstance(keys, int):
split_rngs = jax.random.split(self.rng, num=keys + 1)
self.rng = split_rngs[0]
return tuple(split_rngs[1:])
else:
split_rngs = jax.random.split(self.rng, num=len(keys) + 1)
self.rng = split_rngs[0]
return {key: val for key, val in zip(keys, split_rngs[1:])}
def wrap_function_with_rng(rng):
""" To be used as decorator, automatically bookkeep a RNG for the wrapped function. """
def wrap_function(function):
def wrapped(*args, **kwargs):
nonlocal rng
rng, split_rng = jax.random.split(rng)
return function(split_rng, *args, **kwargs)
return wrapped
return wrap_function
def init_rng(seed):
global jax_utils_rng
jax_utils_rng = JaxRNG.from_seed(seed)
def next_rng(*args, **kwargs):
global jax_utils_rng
return jax_utils_rng(*args, **kwargs)
def flatten_tree(xs, is_leaf=None, sep=None):
""" A stronger version of flax.traverse_util.flatten_dict, supports
dict, tuple, list and TrainState. Tuple and list indices will be
converted to strings.
"""
tree_node_classes = (FrozenDict, dict, tuple, list, TrainState)
if not isinstance(xs, tree_node_classes):
ValueError('fUnsupported node type: {type(xs)}')
def _is_leaf(prefix, fx):
if is_leaf is not None:
return is_leaf(prefix, xs)
return False
def _key(path):
if sep is None:
return path
return sep.join(path)
def _convert_to_dict(xs):
if isinstance(xs, (FrozenDict, dict)):
return xs
elif isinstance(xs, (tuple, list)):
return {f'{i}': v for i, v in enumerate(xs)}
elif isinstance(xs, TrainState):
output = {}
for field in dataclasses.fields(xs):
if 'pytree_node' not in field.metadata or field.metadata['pytree_node']:
output[field.name] = getattr(xs, field.name)
return output
else:
raise ValueError('fUnsupported node type: {type(xs)}')
def _flatten(xs, prefix):
if not isinstance(xs, tree_node_classes) or _is_leaf(prefix, xs):
return {_key(prefix): xs}
result = {}
is_empty = True
for (key, value) in _convert_to_dict(xs).items():
is_empty = False
path = prefix + (key, )
result.update(_flatten(value, path))
return result
return _flatten(xs, ())
def named_tree_map(f, tree, is_leaf=None, sep=None):
""" An extended version of jax.tree_util.tree_map, where the mapped function
f takes both the name (path) and the tree leaf as input.
"""
flattened_tree = flatten_tree(tree, is_leaf=is_leaf, sep=sep)
id_to_name = {id(val): key for key, val in flattened_tree.items()}
def map_fn(leaf):
name = id_to_name[id(leaf)]
return f(name, leaf)
return jax.tree_util.tree_map(map_fn, tree)
def get_pytree_shape_info(tree):
flattend_tree = flatten_tree(tree, sep='/')
shapes = []
for key in sorted(list(flattend_tree.keys())):
val = flattend_tree[key]
shapes.append(f'{key}: {val.dtype}, {val.shape}')
return '\n'.join(shapes)
def collect_metrics(metrics, names, prefix=None):
collected = {}
for name in names:
if name in metrics:
collected[name] = jnp.mean(metrics[name])
if prefix is not None:
collected = {
'{}/{}'.format(prefix, key): value for key, value in collected.items()
}
return collected
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
init_rng(seed)
| Blockwise-Parallel-Transformer-main | blockwise-parallel-transformer/bpt/tools/utils.py |
import torch
import torch.nn as nn
class BlockwiseParallelTransformerAttention(nn.Module):
def __init__(self, input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size):
super(BlockwiseParallelTransformerAttention, self).__init__()
self.input_size = input_size
self.num_heads = num_heads
self.hidden_size = hidden_size
self.num_layers = num_layers
self.max_seq_len = max_seq_len
self.block_size = block_size
self.dim_per_head = hidden_size // num_heads
self.query_chunk_size = max_seq_len // block_size
self.key_value_chunk_size = max_seq_len // block_size
self.num_query_chunks = (max_seq_len + self.query_chunk_size - 1) // self.query_chunk_size
self.num_key_value_chunks = (max_seq_len + self.key_value_chunk_size - 1) // self.key_value_chunk_size
self.query_position_ids = torch.arange(max_seq_len)
self.key_value_position_ids = torch.arange(max_seq_len)
self.query_blocks = nn.Linear(input_size, hidden_size, bias=False)
self.key_blocks = nn.Linear(input_size, hidden_size, bias=False)
self.value_blocks = nn.Linear(input_size, hidden_size, bias=False)
self.feedforward = nn.Linear(hidden_size, hidden_size)
def _chunk_bias_fn(self, query_chunk_idx, key_chunk_idx):
start = key_chunk_idx * self.key_value_chunk_size
end = (key_chunk_idx + 1) * self.key_value_chunk_size
bias_chunk = torch.zeros((self.num_heads, self.query_chunk_size, self.key_value_chunk_size))
bias_chunk[:, :, start:end] = 1
bias_chunk = bias_chunk.unsqueeze(0)
bias_chunk = bias_chunk.repeat(query_chunk_idx.shape[0], 1, 1, 1)
return bias_chunk
def _query_block(self, input_chunk, query_chunk_idx):
query_chunk = self.query_blocks(input_chunk)
query_chunk = query_chunk / torch.sqrt(query_chunk.shape[-1])
return query_chunk
def _key_value_blocks(self, carry, args):
kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
query_chunk, query_chunk_idx = carry
key_chunk = self.key_blocks(kv_chunk)
value_chunk = self.value_blocks(kv_chunk)
attn_weights = torch.einsum('bqhd, bkhd->bqhk', query_chunk. key_chunk)
bias_chunk = self._chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = bias_chunk.permute(0, 1, 3, 2)
attn_weights = attn_weights + bias_chunk
max_score = torch.max(attn_weights, dim=-1, keepdim=True)[0]
exp_weights = torch.exp(attn_weights - max_score)
exp_values = torch.einsum('bqhv, bvhf->bqhf', exp_weights, value_chunk)
numerator = query_chunk.clone()
numerator[:, key_chunk_idx, :, :] = exp_values
denominator = query_chunk.clone()
denominator[:, key_chunk_idx, :, :] = exp_weights.sum(dim=-1, keepdim=True)
return (numerator, denominator), None
def forward(self, x, deterministic=None):
batch_size, seq_len, input_size = x.shape
assert input_size == self.input_size, f"Input size must be {self.input_size} but got {input_size}"
query_chunks = x.reshape(batch_size, self.num_query_chunks, self.query_chunk_size, input_size)
query_chunks = self.query_blocks(query_chunks)
query_chunks = query_chunks / torch.sqrt(query_chunks.shape[-1])
query_position_ids = self.query_position_ids.repeat(batch_size, 1)
query_position_ids = query_position_ids.reshape(batch_size, self.num_query_chunks, self.query_chunk_size)
query_position_ids = query_position_ids.roll(shift=-1, dims=-1)
query_position_ids[:, :, -1] = self.max_seq_len - 1
key_value_chunks = x.reshape(batch_size, self.num_key_value_chunks, self.key_value_chunk_size, input_size)
key_value_chunks = key_value_chunks.detach() if deterministic else key_value_chunks
key_value_position_ids = self.key_value_chunk_position_ids.repeat(batch_size, 1)
key_value_position_ids = key_value_position_ids[:, :-1, :]
key_value_position_ids = torch.cat([key_value_position_ids, torch.ones((batch_size, 1, self.key_value_chunk_size)) * (self.max_seq_len -1)], dim=1)
carry = (query_chunks, None)
for key_chunk_idx in range(self.num_key_value_chunks):
kv_chunk = key_value_chunks[:, key_chunk_idx, :, :]
kv_position_ids_chunk = key_value_position_ids[:, key_chunk_idx, :]
carry, _ = self._key_value_blocks(carry, (kv_chunk, key_chunk_idx, kv_position_ids_chunk))
attn_output = carry[0]
attn_output = attn_output.reshape(batch_size, seq_len, self.hidden_size)
attn_output = self.feedforward(attn_output)
return attn_output
#inpout sequence
batch_size = 2
seq_len = 1024
input_size = 512
x = torch.randn(batch_size, seq_len, input_size)
#define params
num_heads = 8
hidden_size = 512
num_layers = 6
max_seq_len = 1024
block_size = 64
#crete an instance of blockwise paralel
model = BlockwiseParallelTransformerAttention(input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size)
#pass the input sequence to the module to get the output
output = model(x)
print(output.shape)
# import torch
# from torch import nn
# class BlockwiseParallelTransformerAttention(nn.Module):
# def __init__(self, input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size):
# super(BlockwiseParallelTransformerAttention, self).__init__()
# self.input_size = input_size
# self.num_heads = num_heads
# self.hidden_size = hidden_size
# self.num_layers = num_layers
# self.max_seq_len = max_seq_len
# self.block_size = block_size
# self.query_projection = nn.Linear(input_size, num_heads * hidden_size)
# self.key_projection = nn.Linear(input_size, num_heads * hidden_size)
# self.value_projection = nn.Linear(input_size, num_heads * hidden_size)
# self.feedforward = nn.Sequential(
# nn.Linear(num_heads * hidden_size, hidden_size),
# nn.ReLU(),
# nn.Linear(hidden_size, num_heads * hidden_size)
# )
# self.layer_norm1 = nn.LayerNorm(input_size)
# self.layer_norm2 = nn.LayerNorm(num_heads * hidden_size)
# def forward(self, x):
# batch_size, seq_len, input_size = x.size()
# num_blocks = seq_len // self.block_size
# query_blocks = x[:, :num_blocks*self.block_size, :].view(batch_size, num_blocks, self.block_size, input_size)
# key_value_blocks = x[:, :num_blocks*self.block_size, :].view(batch_size, num_blocks, self.block_size, input_size)
# for i in range(self.num_layers):
# for outer in range(num_blocks):
# query = self.query_projection(query_blocks[:, outer, :, :])
# for inner in range(num_blocks):
# key = self.key_projection(key_value_blocks[:, inner, :, :])
# value = self.value_projection(key_value_blocks[:, inner, :, :])
# attention_scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.hidden_size, dtype=torch.float32))
# attention_weights = nn.functional.softmax(attention_scores, dim=-1)
# attention_output = torch.matmul(attention_weights, value)
# if inner == 0:
# blockwise_attention_output = attention_output
# else:
# blockwise_attention_output = torch.cat((blockwise_attention_output, attention_output), dim=2)
# blockwise_attention_output = blockwise_attention_output / torch.sqrt(torch.tensor(blockwise_attention_output.size(-1), dtype=torch.float32))
# feedforward_output = self.feedforward(blockwise_attention_output)
# residual_output = query_blocks[:, outer, :, :] + feedforward_output
# query_blocks[:, outer, :, :] = self.layer_norm1(residual_output)
# query_blocks = self.layer_norm2(query_blocks.view(batch_size, num_blocks*self.block_size, self.num_heads*self.hidden_size)).view(batch_size, num_blocks, self.block_size, self.num_heads*self.hidden_size)
# return query_blocks.view(batch_size, seq_len, self.num_heads*self.hidden_size)
| Blockwise-Parallel-Transformer-main | blockwise_parallel/blockwise_parallel_torch.py |
import torch
import torch.nn as nn
class BlockwiseParallelTransformer(nn.Module):
def __init__(self, input_dim, output_dim, head_dim, num_heads, num_query_blocks, num_kv_blocks):
super(BlockwiseParallelTransformer, self).__init__()
self.query_blocks = num_query_blocks
self.kv_blocks = num_kv_blocks
self.input_dim = input_dim
self.output_dim = output_dim
self.head_dim = head_dim
self.num_heads = num_heads
self.query_layer = nn.Linear(input_dim, num_heads * head_dim)
self.key_layer = nn.Linear(input_dim, num_heads * head_dim)
self.value_layer = nn.Linear(input_dim, num_heads * head_dim)
self.ffn = nn.Sequential(
nn.Linear(input_dim, output_dim),
nn.ReLU(),
nn.Linear(output_dim, input_dim),
)
def forward(self, x):
b, n, _ = x.shape
q_chunk_size = n // self.query_blocks
kv_chunk_size = n // self.kv_blocks
outputs = torch.zeros_like(x)
for q_idx in range(self.query_blocks):
q_chunk_start = q_idx * q_chunk_size
q_chunk_end = (q_idx + 1) * q_chunk_size
q_ = self.query_layer(x[:, q_chunk_start:q_chunk_end])
q = q_.view(b, q_chunk_size, self.num_heads, self.head_dim)
q = q / torch.sqrt(torch.tensor(self.head_dim).float())
attn_numerator = torch.zeros_like(q)
attn_denominator = torch.zeros_like(q)
for kv_idx in range(self.kv_blocks):
kv_chunk_start = kv_idx * kv_chunk_size
kv_chunk_end = (kv_idx + 1) * kv_chunk_size
k_ = self.key_layer(x[:, kv_chunk_start:kv_chunk_end])
v_ = self.value_layer(x[:, kv_chunk_start:kv_chunk_end])
k = k_.view(b, kv_chunk_size, self.num_heads, self.head_dim)
v = v_.view(b, kv_chunk_size, self.num_heads, self.head_dim)
attn_weight = torch.einsum('bhqd,bkhd->bhqk', q, k)
max_score, _ = torch.max(attn_weight, dim=-1, keepdim=True)
exp_weight = torch.exp(attn_weight - max_score)
attn_numerator += torch.einsum('bhqv,bvhf->bhqf', exp_weight, v)
attn_denominator += exp_weight.sum(dim=-1, keepdim=True)
attn_out = (attn_numerator / attn_denominator)
attn_out = attn_out.contiguous().view(-1, self.num_heads * self.head_dim)
ffn_out = self.ffn(attn_out + x[:, q_chunk_start:q_chunk_end])
outputs[:, q_chunk_start:q_chunk_end] = ffn_out + attn_out + x[:, q_chunk_start:q_chunk_end]
return outputs
#inpout sequence
batch_size = 2
seq_len = 1024
input_size = 512
x = torch.randn(batch_size, seq_len, input_size)
#define params
num_heads = 8
hidden_size = 512
num_layers = 6
max_seq_len = 1024
block_size = 64
#crete an instance of blockwise paralel
model = BlockwiseParallelTransformer(input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size)
#pass the input sequence to the module to get the output
output = model(x)
print(output.shape) | Blockwise-Parallel-Transformer-main | blockwise_parallel/test1.py |
# from blockwise_parallel.blockwise_paralle import BlockwiseParallelTransformerAttention
# from blockwise_parallel.test1 import BlockwiseParallelTransformer/
from blockwise_parallel.blockwise_parallel_jax import BlockwiseParallelTransformerAttention | Blockwise-Parallel-Transformer-main | blockwise_parallel/__init__.py |
# import jax
# import jax.numpy as jnp
# from jax import nn, lax
# from jax.experimental.stax import Dense
# class BlockwiseParallelTransformerAttention:
# def __init__(self, input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size):
# self.input_size = input_size
# self.num_heads = num_heads
# self.hidden_size = hidden_size
# self.num_layers = num_layers
# self.max_seq_len = max_seq_len
# self.block_size = block_size
# self.dim_per_head = hidden_size // num_heads
# self.query_chunk_size = max_seq_len // block_size
# self.key_value_chunk_size = max_seq_len // block_size
# self.num_query_chunks = (max_seq_len + self.query_chunk_size - 1) // self.query_chunk_size
# self.num_key_value_chunks = (max_seq_len + self.key_value_chunk_size - 1) // self.key_value_chunk_size
# self.query_position_ids = jnp.arange(max_seq_len)
# self.key_value_position_ids = jnp.arange(max_seq_len)
# self.query_blocks = Dense(hidden_size, name='query')
# self.key_blocks = Dense(hidden_size, name='key')
# self.value_blocks = Dense(hidden_size, name='value')
# self.feedforward = Dense(hidden_size, name='feedforward')
# def _chunk_bias_fn(self, query_chunk_idx, key_chunk_idx):
# start = key_chunk_idx * self.key_value_chunk_size
# end = (key_chunk_idx + 1) * self.key_value_chunk_size
# bias_chunk = jnp.zeros((self.num_heads, self.query_chunk_size, self.key_value_chunk_size))
# bias_chunk = lax.dynamic_update_slice(bias_chunk, jnp.ones((self.num_heads, self.query_chunk_size, end - start)), (slice(None), slice(None), slice(start, end)))
# bias_chunk = jnp.expand_dims(bias_chunk, axis=0)
# bias_chunk = jnp.tile(bias_chunk, (query_chunk_idx.shape[0], 1, 1, 1))
# return bias_chunk
# def _query_block(self, input_chunk, query_chunk_idx):
# query_chunk = self.query_blocks(input_chunk)
# query_chunk = query_chunk / jnp.sqrt(query_chunk.shape[-1])
# return query_chunk
# def _key_value_blocks(self, carry, args):
# kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
# query_chunk, query_chunk_idx = carry
# key_chunk = self.key_blocks(kv_chunk)
# value_chunk = self.value_blocks(kv_chunk)
# attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk)
# bias_chunk = self._chunk_bias_fn(query_chunk_idx, key_chunk_idx)
# bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
# attn_weights = attn_weights + bias_chunk
# max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
# exp_weights = jnp.exp(attn_weights - max_score)
# exp_values = jnp.einsum('bqhv,bvhf->bqhf', exp_weights, value_chunk)
# numerator = jax.lax.dynamic_update_slice(query_chunk, exp_values, (slice(None), key_chunk_idx, slice(None), slice(None)))
# denominator = jax.lax.dynamic_update_slice(query_chunk, exp_weights.sum(axis=-1, keepdims=True), (slice(None), key_chunk_idx, slice(None), slice(None)))
# return (numerator, denominator), None
# def __call__(self, x, deterministic=True):
# batch_size, seq_len, input_size = x.shape
# assert input_size == self.input_size, f"Input size must be {self.input_size} but got {input_size}"
# query_chunks = x.reshape(batch_size, self.num_query_chunks, self.query_chunk_size, input_size)
# query_chunks = self.query_blocks(query_chunks)
# query_chunks = query_chunks / jnp.sqrt(query_chunks.shape[-1])
# query_position_ids = jnp.tile(self.query_position_ids, (batch_size, 1))
# query_position_ids = query_position_ids.reshape(batch_size, self.num_query_chunks, self.query_chunk_size)
# query_position_ids = jax.lax.dynamic_slide(query_position_ids, (0, 0, 0), (batch_size, self.num_query_chunks, self.query_chunk_size - 1))
# query_position_ids = jnp.concatenate([query_position_ids, jnp.ones((batch_size, self.num_query_chunks, 1)) * (self.max_seq_len - 1)], axis=-1)
# query_position_ids = query_position_ids.astype(jnp.int32)
# key_value_chunks = x.reshape(batch_size, self.num_key_value_chinks, self.key_value_chunk_size, input_size)
# key_value_chunks = jax.lax.stop_gradient(key_value_chunks) if deterministic else key_value_chunks
# key_value_position_ids = jnp.tile(self.key_value_position_ids, (batch_size, 1))
# key_value_position_ids = key_value_position_ids.reshape(batch_size, self.num_value_chunks, self.key_value_chunk_size)
# key_value_position_ids = jax.lax.dynamic_slice(key_value_position_ids, (0, 0, 0), (batch_size, self.num_key_value_chunks, self.key_value_chunk_size - 1))
# key_value_position_ids = jnp.concatenate([key_value_position_ids, jnp.ones((batch_size, self.num_key_value_chunks, 1)) * (self.max_seq_len - 1)], axis=-1)
# key_value_position_ids = key_value_position_ids.astype(jnp.int32)
# query_blocks = jax.lax.map(self._query_block, query_chunks, jnp.arange(self.num_query_chunks))
# query_blocks = query_blocks.reshape(batch_size, self.num_query_chunks, self.num_heads, self.query_chunk_size, self.dim_per_head)
# query_blocks = jnp.moveaxis(query_blocks, 2, 3)
# key_value_blocks = key_value_chunks.reshape(batch_size, self.num_key_value_chunks, self.num_heads, self.key_value_chunk_size, self.dim_per_head)
# key_value_blocks = jnp.moveaxis(key_value_blocks, 2, 3)
# carry = (query_blocks, None)
# key_value_blocks = jax.lax.scan(self._key_value_blocks, carry, (key_value_blocks, jnp.arange(self.num_key_value_chunks), key_value_position_ids))[0][0]
# key_value_blocks = jnp.moveaxis(key_value_blocks, 2, 3)
# key_value_blocks = key_value_blocks.reshape(batch_size, self.num_key_value_chunks, self.key_value_chunk_size, self.hidden_size)
# output = jax.lax.map(lambda x: self.feedforward(x.reshape(-1, self.hidden_size)), key_value_blocks)
# output = output.reshape(batch_size, seq_len, self.hidden_size)
# return output
#==================================== v2
# import jax
# import jax.numpy as jnp
# from jax.experimental import stax
# class BlockwiseParallelTransformerAttention(nn.Module):
# def __init__(self, input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size):
# super(BlockwiseParallelTransformerAttention, self).__init__()
# self.input_size = input_size
# self.num_heads = num_heads
# self.hidden_size = hidden_size
# self.num_layers = num_layers
# self.max_seq_len = max_seq_len
# self.block_size = block_size
# self.query_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.key_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.value_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.feedforward = nn.Sequential(
# stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal()),
# nn.ReLU(),
# stax.Dense(num_heads * hidden_size, W_init=jax.nn.initializers.glorot_normal())
# )
# self.layer_norm1 = nn.LayerNorm(input_size)
# self.layer_norm2 = nn.LayerNorm(num_heads * hidden_size)
# def forward(self, x):
# batch_size, seq_len, input_size = x.shape
# num_blocks = seq_len // self.block_size
# query_blocks = x[:, :num_blocks*self.block_size, :].reshape(batch_size, num_blocks, self.block_size, input_size)
# key_value_blocks = x[:, :num_blocks*self.block_size, :].reshape(batch_size, num_blocks, self.block_size, input_size)
# for i in range(self.num_layers):
# query = self.query_blocks(query_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# key = self.key_blocks(key_value_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# value = self.value_blocks(key_value_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# query = query.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# key = key.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# value = value.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# attention_scores = jnp.matmul(query, key.transpose((0, 1, 2, 4, 3))) / jnp.sqrt(jnp.array(self.hidden_size, dtype=jnp.float32))
# attention_weights = nn.functional.softmax(attention_scores, dim=-1)
# attention_output = jnp.matmul(attention_weights, value)
# attention_output = attention_output.transpose((0, 2, 3, 1, 4)).reshape(batch_size*num_blocks, self.block_size, self.num_heads*self.hidden_size)
# attention_output = self.feedforward(attention_output)
# attention_output = attention_output.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 2, 1, 3, 4)).reshape(batch_size, seq_len, self.num_heads*self.hidden_size)
# attention_output = self.layer_norm1(query_blocks + attention_output)
# attention_output = self.layer_norm2(attention_output)
# return attention_output
# def __call__(self, x, deterministic=True):
# batch_size, seq_len, input_size = x.shape
# assert input_size == self.input_size, f'Input size must be {self.input_size}, but got {input_size}'
# query_chunks = x.reshape(batch_size, self.num_query_chunks, self.query_chunk_size, input_size)
# query_chunks = self.query_blocks(query_chunks)
# query_chunks = query_chunks / jnp.sqrt(query_chunks.shape[-1])
# kv_chunks = x.reshape(batch_size, self.num_key_value_chunks, self.key_value_chunk_size, input_size)
# kv_chunks = self.key_blocks(kv_chunks), self.value_blocks(kv_chunks)
# init_carry = (jnp.zeros((batch_size, self.query_chunk_size, self.num_heads, self.dim_per_head)),
# jnp.zeros((batch_size, self.query_chunk_size, self.num_heads, self.dim_per_head)),
# (-jnp.inf) * jnp.ones((batch_size, self.query_chunk_size, self.num_heads, 1)))
# def attention_block(carry, args):
# query_chunk, query_chunk_idx = carry
# kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
# key_chunk, value_chunk = kv_chunk
# attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk)
# bias_chunk = self._chunk_bias_fn(query_chunk_idx, key_chunk_idx)
# bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
# attn_weights = attn_weights + bias_chunk
# max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
# exp_weights = jnp.exp(attn_weights - max_score)
# exp_values = jnp.einsum('bqhv,bvhf->bqhf', exp_weights, value_chunk)
# numerator = jax.lax.dynamic_update_slice(query_chunk, exp_values, (slice(None), query_chunk_idx, slice(None), slice(None)))
# denominator = jax.lax.dynamic_update_slice(query_chunk, exp_weights.sum(axis=-1, keepdims=True), (slice(None), query_chunk_idx, slice(None), slice(None)))
# return (numerator, denominator), None
# def combine_blocks(carry, args):
# query_chunk, query_chunk_idx = carry
# numerator, denominator = args
# numerator = jnp.concatenate([query_chunk, numerator], axis=2)
# denominator = jnp.concatenate([jnp.ones_like(query_chunk), denominator], axis=2)
# attn_output = jnp.sum(numerator / denominator, axis=2)
# attn_output = attn_output.reshape(batch_size, seq_len, self.hidden_size)
# attn_output = attn_output + x
# return (attn_output, query_chunk_idx + 1), None
# def feedforward_block(x):
# hidden = self.feedforward(x)
# hidden = nn.gelu(hidden)
# return hidden + x
# for layer_idx in range(self.num_layers):
# query_chunk_idx = 0
# carry = (query_chunks[:, query_chunk_idx], query_chunk_idx)
# for key_chunk_idx in range(self.num_key_value_chunks):
# kv_chunk = kv_chunks[:, key_chunk_idx]
# kv_position_ids_chunk = self.key_value_position_ids[key_chunk_idx * self.key_value_chunk_size:(key_chunk_idx + 1) * self.key_value_chunk_size]
# carry, _ = BlockParallel(self.num_heads)(attention_block, carry, (kv_chunk, key_chunk_idx, kv_position_ids_chunk))
# attn_output, _ = BlockParallel()(combine_blocks, carry, None)
# x = attn_output
# x = BlockParallel()(feedforward_block, x)
# return x
# # for key_chunk_idx in range(self.num_key_value_chunks):
# # for key_chunk_idx in range(self.num_key_value_chunks):
# # key_value_chunk = kv_chunks[:, key_chunk_idx]
# # key_value_position_ids_chunk = self.key_value_position_ids[key_chunk_idx * self.key_value_chunk_size:(key_chunk_idx + 1) * self.key_value_chunk_size]
# # carry, _ = lax.scan(self._key_value_blocks, carry, (key_value_chunk, key_chunk_idx, key_value_position_ids_chunk))
# # numerator, denominator, bias = carry
# # attn_weights = numerator / denominator
# # attn_weights = jax.lax.dynamic_update_slice(attn_weights, bias, (slice(None), slice(None), slice(None), 0))
# # attn_weights = nn.softmax(attn_weights, axis=-2)
# # attn_weights = jax.lax.dynamic_update_slice(attn_weights, jnp.zeros_like(bias), (slice(None), slice(None), slice(None), 0))
# # value_chunk = jnp.einsum('bqhv,bvhf->bqhf', attn_weights, kv_chunks)
# # value_chunk = value_chunk.reshape(batch_size, self.num_heads * self.query_chunk_size, self.dim_per_head)
# # value_chunk = self.feedforward(value_chunk)
# # value_chunk = value_chunk.reshape(batch_size, self.num_heads, self.query_chunk_size, self.dim_per_head)
# # value_chunk = jnp.moveaxis(value_chunk, 1, 2)
# # if query_chunk_idx == 0:
# # output = value_chunk
# # else:
# # output = jnp.concatenate([output, value_chunk], axis=2)
# # output = output.reshape(batch_size, seq_len, self.hidden_size)
# # return output
# # # def _key_value_blocks(cell, carry, args):
# # # kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
# # # query_chunk, query_chunk_idx = carry
# # # key_chunk = self.key_blocks(kv_chunk)
# # # value_chunk = self.value_blocks(kv_chunk)
# # # attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk)
# # # bias_chunk = self._chunk_bias_fn(query_chunk_idx, key_chunk_idx)
# # # bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
# # # attn_weights = attn_weights + bias_chunk
# # # max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
# # # exp_weights = jnp.exp(attn_weights - max_score)
# # # exp_values = jnp.einsum('bqhv,bvhf->bqhf', exp_weights, value_chunk)
# # # numerator = jax.lax.dynamic_update_slice(query_chunk, exp_values, (slice(None), key_chunk_idx, slice(None), slice(None)))
# # # denominator = jax.lax.dynamic_update_slice(query_chunk, exp_weights.sum(axis=-1, keepdims=True), (slice(None), key_chunk_idx, slice(None), slice(None)))
# # # return (numerator, denominator), None
# # # for query_chunk_idx in range(self.num_query_chunks):
# # # query_chunk = self._query_block(query_chunks[:, query_chunk_idx], query_chunk_idx)
# # # for key_value_chunk_idx in range(self.num_key_value_chunks):
# # # kv_chunk = kv_chunks[:, key_value_chunk_idx, :, :]
# # # init_carry = (query_chunk, query_chunk_idx)
# # # (numerator, denominator), _ = lax.scan(_key_value_blocks, init_carry, (kv_chunk, key_value_chunk_idx))
# # # attention_output_chunk = numerator / denominator
# # # attention_output_chunk = self.feedforward(attention_output_chunk)
# # # query_chunk = query_chunks[:, query_chunk_idx]
# # # attention_output_chunk = attention_output_chunk + query_chunk
# # # attention_output_chunk = nn.LayerNorm(attention_output_chunk)
# # # query_chunks = jax.lax.dynamic_update_slice(query_chunks, attention_output_chunk, (slice(None), query_chunk_idx, slice(None), slice(None)))
# # # attention_output = query_chunks.reshape(batch_size, seq_len, self.hidden_size)
# # # return attention_output
# def BlockParallel(num_blocks=None):
# def decorator(f):
# def wrapper(*args, **kwargs):
# if num_blocks is None:
# num_blocks = jax.local_device_count()
# block_size = args[0].shape[0] // num_blocks
# blocks = [jax.lax.dynamic_slice_in_dim(args[0], i * block_size, block_size, axis=0) for i in range(num_blocks)]
# args = [(block,) + args[1:] for block in blocks]
# outputs = jax.pmap(f)(*args, **kwargs)
# return jnp.concatenate(outputs, axis=0)
# return wrapper
# # return decorator
# import jax
# import jax.numpy as jnp
# from jax.experimental import stax
# class BlockwiseParallelTransformerAttention(nn.Module):
# def __init__(self, input_size, num_heads, hidden_size, num_layers, max_seq_len, block_size):
# super(BlockwiseParallelTransformerAttention, self).__init__()
# self.input_size = input_size
# self.num_heads = num_heads
# self.hidden_size = hidden_size
# self.num_layers = num_layers
# self.max_seq_len = max_seq_len
# self.block_size = block_size
# self.query_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.key_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.value_blocks = stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal())
# self.feedforward = nn.Sequential(
# stax.Dense(hidden_size, W_init=jax.nn.initializers.glorot_normal()),
# nn.ReLU(),
# stax.Dense(num_heads * hidden_size, W_init=jax.nn.initializers.glorot_normal())
# )
# self.layer_norm1 = nn.LayerNorm(input_size)
# self.layer_norm2 = nn.LayerNorm(num_heads * hidden_size)
# def forward(self, x):
# batch_size, seq_len, input_size = x.shape
# num_blocks = seq_len // self.block_size
# query_blocks = x[:, :num_blocks*self.block_size, :].reshape(batch_size, num_blocks, self.block_size, input_size)
# key_value_blocks = x[:, :num_blocks*self.block_size, :].reshape(batch_size, num_blocks, self.block_size, input_size)
# for i in range(self.num_layers):
# query = self.query_blocks(query_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# key = self.key_blocks(key_value_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# value = self.value_blocks(key_value_blocks.reshape(batch_size*num_blocks, self.block_size, input_size))
# query = query.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# key = key.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# value = value.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 3, 1, 2, 4))
# attention_scores = jnp.matmul(query, key.transpose((0, 1, 2, 4, 3))) / jnp.sqrt(jnp.array(self.hidden_size, dtype=jnp.float32))
# attention_weights = nn.functional.softmax(attention_scores, dim=-1)
# attention_output = jnp.matmul(attention_weights, value)
# attention_output = attention_output.transpose((0, 2, 3, 1, 4)).reshape(batch_size*num_blocks, self.block_size, self.num_heads*self.hidden_size)
# attention_output = self.feedforward(attention_output)
# attention_output = attention_output.reshape(batch_size, num_blocks, self.block_size, self.num_heads, self.hidden_size).transpose((0, 2, 1, 3, 4)).reshape(batch_size, seq_len, self.num_heads*self.hidden_size)
# attention_output = self.layer_norm1(query_blocks + attention_output)
# attention_output = self.layer_norm2(attention_output)
# return attention_output
#==================================== v3
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = blockwise_compute_ffn(
self.attn,
hidden_states + attn_output,
chunk_size=self.q_chunk_size,
deterministic=deterministic,
policy=self.policy,
prevent_cse=self.prevent_cse,
)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
def blockwise_compute_ffn(cell, inputs, chunk_size, deterministic, policy, prevent_cse):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def ffn(cell, _, hidden_states):
outputs = cell.forward_ffn(hidden_states, deterministic=deterministic)
return _, outputs
ffn_remat = nn.remat(
ffn,
variables="params",
rngs={"params" : False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
ffn_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
class Blockwise_LM_Head(nn.Module):
vocab_size: int
chunk_size: int
policy: str = 'nothing_saveable'
dtype: jnp.dtype = jnp.float32
prevent_cse: bool = False
def setup(self):
self.lm_head = nn.Dense(
self.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(self, inputs):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=self.chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def lm_head(cell, _, hidden_states):
outputs = cell(hidden_states)
return _, outputs
lm_head_remat = nn.remat(
lm_head,
variables="params",
rngs={"params" : False},
prevent_cse=self.prevent_cse,
policy=get_gradient_checkpoint_policy(self.policy),
)
_, res = nn.scan(
lm_head_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(self.lm_head, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
def blockwise_cross_entropy(logits, tokens, valid=None,
chunk_size=None, policy=None, prevent_cse=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
logits = jnp.reshape(logits, (-1, logits.shape[-1]))
tokens = jnp.reshape(tokens, (-1,))
valid = jnp.reshape(valid, (-1,))
def _cross_entropy_loss_and_accuracy(logits, tokens, valid):
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
return token_log_prob, correct, valid_text_length
@partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def _loss_and_accuracy(carry, args):
loss, accuracy, num = carry
logits, tokens, valid = args
token_log_prob, correct, valid_text_length = \
_cross_entropy_loss_and_accuracy(logits, tokens, valid)
loss = loss + jnp.sum(token_log_prob, axis=-1) / valid_text_length
accuracy = accuracy + jnp.sum(correct, axis=-1) / valid_text_length
num = num + 1
return (loss, accuracy, num), None
num_chunk = logits.shape[0] // chunk_size
logits = rearrange(logits, '(n c) d -> n c d', c=chunk_size)
tokens = rearrange(tokens, '(n c) -> n c', c=chunk_size)
valid = rearrange(valid, '(n c) -> n c', c=chunk_size)
(loss, accuracy, num), _ = jax.lax.scan(
_loss_and_accuracy, (0.0, 0.0, 0), xs=(logits, tokens, valid),
length=num_chunk,
)
loss = - loss / num
accuracy = accuracy / num
return loss, accuracy
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_simplified'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
| Blockwise-Parallel-Transformer-main | blockwise_parallel/blockwise_parallel_jax.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from einops import rearrange
from types import partial
def quick_gelu(x):
return x * torch.sigmoid(1.702 * x)
ACT2FN = {
"gelu": F.gelu,
"relu": F.relu,
"silu": F.silu,
"swish": F.swish,
"gelu_new": quick_gelu,
"quick_gelu": quick_gelu,
}
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 * (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i, j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, 0:sentinel] = cos
return torch.tensor(out)
def rotate_every_two(tensor):
rotate_half_tensor = torch.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), dim=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(1, 1, 2, 1)
cos_pos = cos_pos[:, :, None, :].repeat(1, 1, 2, 1)
return (torch * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class BlockwiseParallel(nn.Module):
def __init__(self, hidden_size, num_heads, rotary_dim, intermediate_size, layer_norm_epsilon=1e-5,
activation_function="gelu", resid_pdrop=0.0, max_position_embeddings=1024, dtype=torch.float32,
casual=True, float32_logits=False):
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.rotary_dim = rotary_dim
self.intermediate_size = intermediate_size
self.layer_norm_epsilon = layer_norm_epsilon
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.max_position_embeddings = max_position_embeddings
self.dtype = dtype
self.casual = casual
self.float32_logits = float32_logits
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Linear,
self.embed_dim,
bias=False,
dtype=self.dtype
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(self.hideen_size, eps=self.layer_norm_epsilon, elementwise_affine=True)
self.ln_2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_epsilon, elementwise_affine=True)
self.fc_in = nn.Linear(self.hidden_size, self.intermediate_size, dtype=self.dtype)
self.fc_out = nn.Linear(self.intermediate_size, self.hidden_size, dtype=self.dtype)
self.act = ACT2FN(self.activation_function)
self.resid_pdrop = nn.Dropout(p=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.view(hidden_states.shape[-2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.view(hidden_states.shape[:-2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_pdrop(attn_output)
return attn_output
def forward_qkv(self, hidden_states, position_ids, deterministic=True):
hidden_states = self.ln_1(hidden_states)
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
q = self._split_heads(q)
k = self._split_heads(k)
v = self._split_heads(v)
if self.rotary_dim is not None and self.rotary_dim > 0:
sincos = self.embed_positions[position_ids].unsqueeze(1)
q, k = apply_rotary_pos_emb(q, sincos), apply_rotary_pos_emb(k, sincos)
return q, k, v
def forward(self, hidden_states, position_ids, attention_mask=None, deterministic=True):
q, k, v = self.forward_qkv(hidden_states, position_ids, deterministic)
attn_output, attn_weights = self._attn(q, k, v, attention_mask, deterministic)
attn_output = self.attn_out_proj(attn_output, deterministic)
hidden_states = hidden_states + attn_output
hidden_states = self.ln_2(hidden_states)
ffn_output = self.fc_in(hidden_states)
ffn_output = self.act(ffn_output)
ffn_output = self.fc_out(ffn_output)
ffn_output = self.resid_pdrop(ffn_output)
hidden_states = hidden_states + ffn_output
return hidden_states, attn_weights
def _attn(self, q, k, v, attention_mask=None, deterministic=True):
attn_weights = torch.matmul(q, k.transpose(-1, -2))
if attention_mask is None:
attn_weights = attn_weights + attention_mask
attn_weights = F.softmax(attn_weights, dim=-1)
if not deterministic:
attn_weights = self.resid_pdrop(attn_weights)
attn_output = torch.matmul(attn_weights, v)
return attn_output, attn_weights
| Blockwise-Parallel-Transformer-main | blockwise_parallel/blockwise_torch.py |
import torch
import numpy
#prexisting arrays
w = torch.tensor([1, 2, 3])
#tuple
w = torch.tensor((1, 2, 3))
# numpy array
w = torch.tensor(numpy.array([1, 2, 3]))
#init by sized
w = torch.empty(100, 200) #not initialized
w = torch.zeros(100, 200) # elements with 0.0
w = torch.ones(100, 200) # elements with 1.0
#random tensor inits
w = torch.randn(100, 200) #creates 100 x 200 tensor with elements from a uniform distribution on the interval [0, 1]
w = torch.randn(100, 200) # elements are random numbers from a normal distribution, with a mean of 0 and a veriance of 1
w = torch.randint(5, 10, (100, 200)) # elements are random integers between 5 and 10
#init with specified data type and device
w = torch.empty((100, 200), dtype=torch.float64,
device='cuda')
#init to have the same szie data type;
x = torch.empty_like(w)
#specify type using dtype
w = torch.tensor([1, 2, 3], dtype=torch.float32)
#use casting method to cast to a new data type
w.int() # w remains a float32 after the cast
w = w.int() # w changes to an int 32 after the cast
#use the to() method to cast to a new type
w = w.to(torch.float64)
w = w.to(dtype=torch.float64)
#python converts data types in ops | TorchPractice-main | TorchPractice/tensors.py |
TorchPractice-main | TorchPractice/__init__.py |
|
from setuptools import setup, find_packages
setup(
name = 'optimus-prime-transformers',
packages = find_packages(exclude=['examples']),
version = '1.2.1',
license='MIT',
description = 'optimus-prime - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
url = 'https://github.com/kyegomez/Optimus-Prime',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch',
'einops'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| Optimus-Prime-main | setup.py |
import gzip
import tqdm
import torch
import random
import numpy as np
from torch.utils.data import Dataset, DataLoader
from optimus_prime import TransformerWrapper, Decoder, AutoregressiveWrapper, AndromedaEmbedding
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 1
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
SAVE_EVERY=500
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# Model
model = TransformerWrapper(
num_tokens=20000,
max_seq_len=8192,
use_abs_pos_emb=False,
embedding_provider=AndromedaEmbedding(),
attn_layers=Decoder(
dim=512,
depth=6,
heads=8,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_flash=True,
# shift_tokens=1,
attn_one_kv_head=True,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
)
model = AutoregressiveWrapper(model)
model.cuda()
with gzip.open('./enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
# Training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
outputs = model(next(train_loader)) # Get the outputs
loss = outputs[0].mean() # Calculate the mean of the first output tensor
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
outputs = model(next(val_loader)) # Get the outputs
loss = outputs[0].mean() # Calculate the mean of the first output tensor
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print('%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str) | Optimus-Prime-main | train.py |
import torch
from optimus_prime.attend import Attend
model = Attend(dim=512, dim_head=64, heads=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
out, _ = model(q, k, v)
assert out.shape == (1, 8, 512, 64)
| Optimus-Prime-main | simple.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
from optimus_prime.autoregressive_wrapper import top_k, eval_decorator
# helper functions
def exists(val):
return val is not None
def divisible_by(numer, denom):
return (numer % denom) == 0
# xl autoregressive wrapper class
class XLAutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
mems = None,
**kwargs
):
device, max_seq_len = start_tokens.device, self.max_seq_len
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
*all_leading_tokens, _ = start_tokens.split(max_seq_len, dim = -1)
# catch the memory up to the current segment
for leading_tokens in all_leading_tokens:
_, mems = self.net(
leading_tokens,
mems = mems,
return_mems = True,
**kwargs
)
# now start sampling from the current segment
curr_pos = len(all_leading_tokens) * max_seq_len
curr_mems = mems
out = start_tokens
for _ in range(seq_len):
curr_segment_len = out.shape[-1]
is_last_segment_tokens = divisible_by(curr_segment_len, max_seq_len)
x = out[:, curr_pos:]
logits, mems = self.net(
x,
mems = curr_mems,
return_mems = True,
**kwargs
)
logits = logits[:, -1]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
if is_last_segment_tokens:
curr_pos = curr_segment_len
curr_mems = mems
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(
self,
x,
mems = None,
**kwargs
):
ignore_index, max_seq_len = self.ignore_index, self.max_seq_len
x, labels = x[:, :-1], x[:, 1:]
seq_len = x.shape[1]
# prepare chunks
split_x = x.split(max_seq_len, dim = -1)
split_labels = labels.split(max_seq_len, dim = -1)
loss_weights = tuple(map(lambda t: t.shape[-1] / seq_len, split_x))
# go through each chunk and derive weighted losses
total_loss = 0.
for chunk, chunk_labels, loss_weight in zip(split_x, split_labels, loss_weights):
logits, mems = self.net(
chunk,
mems = mems,
return_mems = True,
**kwargs
)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
chunk_labels,
ignore_index = ignore_index
)
total_loss = total_loss + loss * loss_weight
return total_loss
| Optimus-Prime-main | optimus_prime/xl_autoregressive_wrapper.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
| Optimus-Prime-main | optimus_prime/autoregressive_wrapper.py |
#add ability to choose your own tokenizer, and embedder, and ask what else can be done for production level training
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from dataclasses import dataclass
from typing import List
from einops import rearrange, repeat
from optimus_prime.attend import Attend, Intermediates
from optimus_prime.autoregressive_wrapper import AutoregressiveWrapper
from abc import ABC, abstractmethod
# import bitsandbytes as bnb
# constants
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: List[Tensor] = None
attn_intermediates: List[Intermediates] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
#tokenization
class BaseTokenizer(ABC):
@abstractmethod
def tokenize(self, text: str) -> List[int]:
pass
class CustomTokenizer(BaseTokenizer):
def tokenize(self, text: str) -> List[int]:
# Your custom tokenization algorithm
tokens = ...
return tokens
# embedding
class BaseEmbedding(ABC):
@abstractmethod
def get_embedding(self, num_tokens: int, dim: int) -> nn.Module:
# Custom embedding function or model
embedding = ...
return embedding
class AndromedaEmbedding(BaseEmbedding):
def get_embedding(self, num_tokens: int, dim: int) -> nn.Module:
embedding = nn.Embedding(num_tokens, dim)
return embedding
# class AndromedaBnBEmbedding(BaseEmbedding):
# def get_embedding(self, num_tokens: int, dim: int, padding_idx: int = 0) -> bnb.nn.modules:
# embedding = bnb.nn.modules.Embedding(num_tokens, dim, padding_idx)
# return embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, embedding_provider: BaseEmbedding, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = embedding_provider.get_embedding(num_tokens, dim)
# nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, i, j, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent = False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolated = False,
max_postion_embeddings=None, #2048
base=None, #10000
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
if self.interpolated:
max_position_embeddings = self.max_position_embeddings
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(
self.max_seq_len_cached,
device=self.inv_freq.device,
dtype=self.inv_freq.dtype,
)
#interpolation
self.scale = 1 / 4
t *= self.scale
else:
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
def scale_fn(t):
return t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(self, dim_in, dim_out, activation):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate)
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
) if not glu else GLU(dim, inner_dim, activation)
self.ff = nn.Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
add_zero_kv = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv=add_zero_kv,
flash = flash
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = default(context_mask, mask)
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
top, _ = dots.topk(self.sparse_topk, dim = -1)
vk = rearrange(top[..., -1], '... -> ... 1')
sparse_topk_mask = dots < vk
masks.append(sparse_topk_mask)
if len(masks) > 0:
final_attn_mask = or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = None,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_xpos_scale_base = 512,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned else AlibiPositionalBias
self.rel_pos = alibi_pos_klass(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
assert not (not pre_norm and resi_dual), 'resiDualcannot be used when not using prenorm'
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
self.cross_attend = cross_attend
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if (resi_dual or not pre_norm) and not is_last_layer else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
self.layers_length = len(self.layers) # It doesn't work if called after
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
ind == (self.layers_length - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm) and not self.resi_dual:
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = residual_fn(out, outer_residual)
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if self.resi_dual:
x = x + pre_norm(outer_residual)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
dropout = 0.,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
x = self.norm(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
# tokenizer: BaseTokenizer,
embedding_provider,
emb_dim = None,
max_mem_len = 0.,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1. # GLM-130B and Cogview successfully used this, set at 0.1
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
# your own tokenizer
# self.tokenizer = tokenizer
#your own embedding function
self.token_emb = TokenEmbedding(emb_dim, num_tokens, embedding_provider, l2norm_embed=l2norm_embed)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, embedding_provider, l2norm_embed=l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
x = self.norm(x)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
x = self.norm(x)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
deepnorm = False,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
if deepnorm:
enc_kwargs['scale_residual'] = True
dec_kwargs['scale_residual'] = True
enc_depth = enc_kwargs['depth']
dec_depth = dec_kwargs['depth']
enc_kwargs['scale_residual_constant'] = 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625
dec_kwargs['scale_residual_constant'] = (3 * dec_depth) ** 0.25
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if deepnorm:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out
| Optimus-Prime-main | optimus_prime/x_transformers.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from x_transformers.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, TransformerWrapper, ViTransformerWrapper, ContinuousTransformerWrapper
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
from x_transformers.nonautoregressive_wrapper import NonAutoregressiveWrapper
from x_transformers.continuous_autoregressive_wrapper import ContinuousAutoregressiveWrapper
from x_transformers.xl_autoregressive_wrapper import XLAutoregressiveWrapper
| Optimus-Prime-main | optimus_prime/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
def exists(val):
return val is not None
class ContinuousAutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, **kwargs):
device = start_tokens.device
was_training = self.net.training
num_dims = len(start_tokens.shape)
assert num_dims >= 2, 'number of dimensions of your start tokens must be greater or equal to 2'
if num_dims == 2:
start_tokens = start_tokens[None, :]
b, t, _, device = *start_tokens.shape, start_tokens.device
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
last = self.net(x, **kwargs)[:, -1:]
out = torch.cat((out, last), dim = -2)
out = out[:, t:]
if num_dims == 2:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
inp, target = x[:, :-1], x[:, 1:]
mask = kwargs.get('mask', None)
if exists(mask) and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs['mask'] = mask
out = self.net(inp, **kwargs)
loss = F.mse_loss(out, target, reduction = 'none')
if exists(mask):
loss = loss[mask]
return loss.mean()
| Optimus-Prime-main | optimus_prime/continuous_autoregressive_wrapper.py |
from functools import partial
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
from optimus_prime.flash import FlashAttention
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dim,
dim_head,
q_bucket_size,
k_bucket_size,
parallel,
mixed_precision,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
scale = None,
qk_norm = False,
add_zero_kv=False,
flash = False,
Flash2 = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.add_zero_kv = add_zero_kv
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.Flash2 = Flash2
if Flash2:
self.flash_attention = FlashAttention(
dim = dim,
heads = heads,
dim_head = dim_head,
causal = causal,
q_bucket_size = q_bucket_size,
k_bucket_size = k_bucket_size,
parallel = parallel,
mixed_precision = mixed_precision
)
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
mask = mask | causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, -1, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(mask, mask_value // 2)
elif causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value=0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value=True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value=0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
# return FlashAttention(q, k, v, mask=mask, attn_bias=attn_bias )
if self.Flash2:
#reshape q, k, v before passing into flash attention
batch_size, sequence_length, dim=q.shape
q = q.view(batch_size, self.heads, sequence_length, self.dim_head)
k = k.view(batch_size, self.heads, sequence_length, self.dim_head)
v = v.view(batch_size, self.heads, self.dim_head)
return self.flash_attention(q, k, v)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
dtype = dots.dtype
pre_softmax_attn = dots.clone()
mask_value = -torch.finfo(dots.dtype).max
if exists(mask):
dots = dots.masked_fill(mask, mask_value)
if self.causal:
i, j = dots.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
dots = dots.masked_fill(causal_mask, mask_value)
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
| Optimus-Prime-main | optimus_prime/attend.py |
import math
import torch
from torch import nn, einsum
from torch.autograd.function import Function
from einops import rearrange
from torch.cuda.amp import autocast, GradScaler
from torch.nn import DataParallel
# constants
EPSILON = 1e-10
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# flash attention forwards and backwards
# flash attention v1 - https://arxiv.org/abs/2205.14135
# flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
""" Algorithm 1 in the v2 paper """
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device)
scale = (q.shape[-1] ** -0.5)
num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size)
num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size)
if exists(mask) and mask.ndim == 2:
mask = rearrange(mask, 'b n -> b 1 1 n')
if not exists(mask):
col_masks = (None,) * num_col_tiles
mask = (col_masks,) * num_row_tiles
else:
mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2)
mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
all_row_maxes.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(col_mask):
attn_weights.masked_fill_(~col_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
block_row_maxes = attn_weights.amax(dim = -1, keepdims = True)
new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
exp_weights = torch.exp(attn_weights - new_row_maxes)
if exists(col_mask):
exp_weights.masked_fill_(~col_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
new_row_sums = exp_row_max_diff * row_sums + block_row_sums
oc.mul_(exp_row_max_diff).add_(exp_values)
row_maxes.copy_(new_row_maxes)
row_sums.copy_(new_row_sums)
oc.div_(row_sums)
lse = all_row_sums.log() + all_row_maxes
ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, lse)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
""" Algorithm 2 in the v2 paper """
causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, lse = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
lse.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
p = torch.exp(attn_weights - lsec)
if exists(col_mask):
p.masked_fill_(~col_mask, 0.)
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None
# main class
# just flash attention in plain pytorch
# it will be way slower than implementing it in CUDA
# for tinkering and educational purposes
class FlashAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
q_bucket_size = 512,
k_bucket_size = 1024,
parallel = False,
mixed_precision = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.parallel = parallel
self.mixed_precision = mixed_precision
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
if self.parallel:
self.model = DataParallel(self)
if self.mixed_precision:
self.scaler = GradScaler()
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
# batch_size = x.shape[0]
# x = x.view(batch_size, -1)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
if self.parallel:
# Split the input data into chunks and move each chunk to the correct GPU
num_gpus = torch.cuda.device_count()
x_chunks = x.split(x.size(0) // num_gpus)
x_chunks = [chunk.to(f'cuda:{i}') for i, chunk in enumerate(x_chunks)]
q = x_chunks
if self.mixed_precision:
# Use autocast to allow operations to run in lower precision
with autocast():
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
else:
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out) | Optimus-Prime-main | optimus_prime/flash.py |
import math
from random import random
from contextlib import nullcontext
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
from optimus_prime.x_transformers import TransformerWrapper
from typing import Optional
# constants
Losses = namedtuple('Losses', ['loss', 'generator_loss', 'critic_loss'])
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# sampling helpers
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
# prob helpers
def sample_prob(prob):
return random() < prob
def coin_flip():
return sample_prob(0.5)
# tensor helpers
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# schedules
def linear_schedule(t):
return 1 - t
def cosine_schedule(t):
""" https://arxiv.org/abs/2202.04200 """
return torch.cos(t * math.pi / 2)
# self token critic
# inspired by Nijkamp et al. - https://aclanthology.org/2021.naacl-main.409/
class SelfCritic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
dim = net.attn_layers.dim
self.to_logits = nn.Linear(dim, 1)
def forward(self, x):
embed = self.net(x, return_embeddings = True)
return self.to_logits(embed)
class NonAutoregressiveWrapper(nn.Module):
"""
https://arxiv.org/abs/1904.09324
https://arxiv.org/abs/2202.04200
"""
def __init__(
self,
net,
*,
mask_id,
steps = 18,
self_cond = False,
self_cond_train_prob = 0.75,
no_replace_prob = 0.15, # which percentage of the tokens masked will stay the same, done in original MLM paper
random_token_prob = 0.1, # which percentage of tokens to be replaced with random token, done in original MLM paper
schedule = 'linear',
can_mask_prev_unmasked = False, # when unmasking, whether it can remask previously unmasked
token_critic: Optional[TransformerWrapper] = None,
self_token_critic = False,
critic_loss_weight = 1.
):
super().__init__()
assert not (self_token_critic and exists(token_critic))
self.net = net
dim = net.emb_dim
self.dim = dim
self.num_tokens = net.num_tokens
self.mask_id = mask_id
# afaict, maskgit paper did not do this
# but may help for self conditioning, as used successfully in original BERT
self.no_replace_prob = no_replace_prob
self.random_token_prob = random_token_prob
self.max_seq_len = net.max_seq_len
self.steps = steps
if callable(schedule):
self.schedule_fn = schedule
if schedule == 'linear':
self.schedule_fn = linear_schedule
elif schedule == 'cosine':
self.schedule_fn = cosine_schedule
else:
raise ValueError(f'invalid schedule {schedule}')
self.can_mask_prev_unmasked = can_mask_prev_unmasked
# self conditioning
self.self_cond = self_cond
if self_cond:
self.null_embed = nn.Parameter(torch.randn(dim))
self.to_self_cond = nn.Linear(dim, dim, bias = False) if self_cond else None
self.self_cond_train_prob = self_cond_train_prob
# token critic
self.token_critic = token_critic
if self_token_critic:
self.token_critic = SelfCritic(net)
self.critic_loss_weight = critic_loss_weight
@torch.no_grad()
def generate(
self,
batch_size = None,
start_temperature = 1.,
filter_thres = 0.7,
noise_level_scale = 1.,
**kwargs
):
sample_one = not exists(batch_size)
batch_size = default(batch_size, 1)
device = next(self.net.parameters()).device
was_training = self.training
self.eval()
times = torch.linspace(0., 1., self.steps + 1)
# sequence starts off as all masked
shape = (batch_size, self.max_seq_len)
seq = torch.full(shape, self.mask_id, device = device)
mask = torch.full(shape, True, device = device)
# slowly demask
all_mask_num_tokens = (self.schedule_fn(times[1:]) * self.max_seq_len).long()
# self conditioning
has_self_cond = self.self_cond
last_embed = self.null_embed if has_self_cond else None
for mask_num_tokens, steps_until_x0 in zip(all_mask_num_tokens.tolist(), reversed(range(self.steps))):
self_cond = self.to_self_cond(last_embed) if has_self_cond else None
logits, embeds = self.net(
seq,
sum_embeds = self_cond,
return_logits_and_embeddings = True,
**kwargs
)
if has_self_cond:
last_embed = embeds
if exists(filter_thres):
logits = top_k(logits, filter_thres)
annealing_scale = steps_until_x0 / self.steps
temperature = start_temperature * annealing_scale
(logits / max(temperature, 1e-3)).softmax(dim = -1)
sampled_ids = gumbel_sample(logits, temperature = max(temperature, 1e-3))
seq = torch.where(mask, sampled_ids, seq)
if exists(self.token_critic):
scores = self.token_critic(seq)
scores = rearrange(scores, 'b n 1 -> b n')
scores = scores + noise_level_scale * gumbel_noise(scores) * annealing_scale
else:
scores = 1 - logits.softmax(dim = -1)
scores = scores.gather(2, rearrange(sampled_ids, 'b n -> b n 1'))
scores = rearrange(scores, 'b n 1 -> b n')
if mask_num_tokens == 0:
pass
if not self.can_mask_prev_unmasked:
scores = scores.masked_fill(~mask, -torch.finfo(scores.dtype).max)
mask_indices = scores.topk(mask_num_tokens, dim = -1).indices
mask = torch.zeros_like(scores, dtype = torch.bool).scatter(1, mask_indices, True)
seq = seq.masked_fill(mask, self.mask_id)
self.train(was_training)
if sample_one:
seq = rearrange(seq, '1 n -> n')
return seq
def forward(
self,
x,
only_train_generator = False,
only_train_critic = False,
generator_sample_temperature = None,
**kwargs
):
b, n, device = *x.shape, x.device
assert n == self.max_seq_len
orig_seq = x.clone()
rand_times = torch.empty(b, device = device).uniform_(0, 1)
batched_randperm = torch.rand((b, n), device = device).argsort(dim = -1).float()
rand_probs = self.schedule_fn(rand_times)
num_tokens_mask = (rand_probs * n).clamp(min = 1.)
mask = batched_randperm < rearrange(num_tokens_mask, 'b -> b 1')
# to ensure all tokens produce embeddings, instead of just the ones with [mask] input, as done in seminal BERT MLM paper
# potentially needed for self-conditioning (on embedding) to work well
replace_mask_id_mask = mask.clone()
frac_seq_left = 1.
if self.no_replace_prob > 0. and coin_flip():
frac_seq_left -= self.no_replace_prob
no_replace_prob_mask = get_mask_subset_prob(mask, self.no_replace_prob)
replace_mask_id_mask &= ~no_replace_prob_mask
if self.random_token_prob > 0. and coin_flip():
random_token_prob_mask = get_mask_subset_prob(replace_mask_id_mask, self.random_token_prob * frac_seq_left)
random_tokens = torch.randint(0, self.num_tokens, (b, n), device = device)
x = torch.where(random_token_prob_mask, random_tokens, x)
replace_mask_id_mask &= ~random_token_prob_mask
masked = torch.where(replace_mask_id_mask, self.mask_id, x)
# self conditioning
if self.self_cond:
self_cond = self.null_embed
if sample_prob(self.self_cond_train_prob):
with torch.no_grad():
self_cond = self.net(masked, return_embeddings = True, **kwargs).detach()
kwargs.update(sum_embeds = self.to_self_cond(self_cond))
# logits
context = torch.no_grad if only_train_critic else nullcontext
with context():
logits = self.net(masked, **kwargs)
# cross entropy loss
loss = F.cross_entropy(
logits[mask],
orig_seq[mask]
)
if not exists(self.token_critic) or only_train_generator:
return Losses(loss, loss, None)
sampled_ids = gumbel_sample(logits, temperature = default(generator_sample_temperature, random()))
generated = torch.where(mask, sampled_ids, orig_seq)
critic_logits = self.token_critic(generated)
critic_labels = (sampled_ids != orig_seq).float()
critic_loss = F.binary_cross_entropy_with_logits(
rearrange(critic_logits, '... 1 -> ...'),
critic_labels
)
# determine losses to be returned based on what researcher wants to train
if only_train_critic:
total_loss = critic_loss
loss = None
else:
total_loss = loss + critic_loss * self.critic_loss_weight
return Losses(total_loss, loss, critic_loss)
| Optimus-Prime-main | optimus_prime/nonautoregressive_wrapper.py |
import logging
import pytest
import torch
from optimus_prime.attend import Attend
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test_forward_pass():
logger.info("Running forward pass test...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, flash=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
out, _ = model(q, k, v)
assert out.shape == (1, 8, 512, 64)
logger.info("Forward pass test passed.")
def test_backward_pass():
logger.info("Running backward pass test...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, flash=True)
q = torch.randn(1, 8, 512, 64, requires_grad=True)
k = torch.randn(1, 8, 512, 64, requires_grad=True)
v = torch.randn(1, 8, 512, 64, requires_grad=True)
out, _ = model(q, k, v)
out.sum().backward()
assert q.grad is not None
assert k.grad is not None
assert v.grad is not None
logger.info("Backward pass test passed.")
def test_memory_usage():
logger.info("Running memory usage test...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, flash=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
before_memory = torch.cuda.memory_allocated()
out, _ = model(q, k, v)
after_memory = torch.cuda.memory_allocated()
assert after_memory - before_memory < 1e6 # less than 1MB increase
logger.info("Memory usage test passed.")
def test_execution_time():
import time
logger.info("Running execution time test...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, flash=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
start_time = time.time()
out, _ = model(q, k, v)
end_time = time.time()
assert end_time - start_time < 1 # less than 1 second
logger.info("Execution time test passed.")
def test_error_rate():
logger.info("Running error rate test...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, flash=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
out, _ = model(q, k, v)
assert (out != out).sum() == 0 # no NaN values
logger.info("Error rate test passed.")
########################## FLASH ATTENTIO 2.0
def test_forward_pass_flash2():
logger.info("Running forward pass test for Flash Attention 2.0...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
out, _ = model(q, k, v)
assert out.shape == (1, 8, 512, 64)
logger.info("Forward pass test for Flash Attention 2.0 passed.")
def test_backward_pass_flash2():
logger.info("Running backward pass test for Flash Attention 2.0...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64, requires_grad=True)
k = torch.randn(1, 8, 512, 64, requires_grad=True)
v = torch.randn(1, 8, 512, 64, requires_grad=True)
out, _ = model(q, k, v)
out.sum().backward()
assert q.grad is not None
assert k.grad is not None
assert v.grad is not None
logger.info("Backward pass test for Flash Attention 2.0 passed.")
def test_memory_usage_flash2():
logger.info("Running memory usage test for Flash Attention 2.0...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
before_memory = torch.cuda.memory_allocated()
out, _ = model(q, k, v)
after_memory = torch.cuda.memory_allocated()
assert after_memory - before_memory < 1e6 # less than 1MB increase
logger.info("Memory usage test for Flash Attention 2.0 passed.")
def test_execution_time_flash2():
import time
logger.info("Running execution time test for Flash Attention 2.0...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
start_time = time.time()
out, _ = model(q, k, v)
end_time = time.time()
assert end_time - start_time < 1 # less than 1 second
logger.info("Execution time test for Flash Attention 2.0 passed.")
def test_error_rate_flash2():
logger.info("Running error rate test for Flash Attention 2.0...")
model = Attend(dim=512, dim_head=64, q_bucket_size=128, k_bucket_size=128, parallel=False, mixed_precision=False, Flash2=True)
q = torch.randn(1, 8, 512, 64)
k = torch.randn(1, 8, 512, 64)
v = torch.randn(1, 8, 512, 64)
out, _ = model(q, k, v)
assert (out != out).sum() == 0 # no NaN values
logger.info("Error rate test for Flash Attention 2.0 passed.")
test_forward_pass()
test_backward_pass()
test_memory_usage()
test_execution_time()
test_error_rate() | Optimus-Prime-main | tests/attend/attend.py |
import tqdm
import torch
import torch.optim as optim
from optimus_prime_transformers import XTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, src.shape[1]).bool().cuda()
yield (src, tgt, src_mask)
# instantiate model
model = XTransformer(
dim = 512,
tie_token_emb = True,
return_tgt_loss = True,
enc_num_tokens=NUM_TOKENS,
enc_depth = 3,
enc_heads = 8,
enc_max_seq_len = ENC_SEQ_LEN,
dec_num_tokens = NUM_TOKENS,
dec_depth = 3,
dec_heads = 8,
dec_max_seq_len = DEC_SEQ_LEN
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask = next(cycle())
loss = model(src, tgt, mask=src_mask)
loss.backward()
print(f'{i}: {loss.item()}')
optim.step()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, mask = src_mask)
incorrects = (src != sample).abs().sum()
print("input: ", src)
print("predicted output: ", sample)
print(f"incorrects: {incorrects}")
| Optimus-Prime-main | examples/toy_tasks/enc_dec_copy.py |
from optimus_prime_transformers import (
TransformerWrapper,
Encoder,
NonAutoregressiveWrapper
)
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e8)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 250
SEQ_LEN = 256
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
model = TransformerWrapper(
num_tokens = 256 + 1,
logits_dim = 256,
max_seq_len = SEQ_LEN,
attn_layers = Encoder(
dim = 512,
depth = 8,
heads = 8,
dynamic_pos_bias = True
)
)
model = NonAutoregressiveWrapper(
model,
steps = 18,
schedule = 'cosine',
mask_id = 256, # mask id is last token, which is why num_tokens above has a +1 (special token)
self_token_critic = True
)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader)).loss
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
val_data = next(val_loader)
loss = model(val_data).loss
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
sample = model.generate()
output_str = decode_tokens(sample)
print(output_str)
| Optimus-Prime-main | examples/enwik8_simple/train_nar.py |
from optimus_prime_transformers import TransformerWrapper, Decoder
from optimus_prime_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TransformerWrapper(
num_tokens = 256,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 512, depth = 6, heads = 8)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print('%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| Optimus-Prime-main | examples/enwik8_simple/train.py |
from setuptools import setup, find_packages
#
setup(
name = 'hivemind',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Hive - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/Hive',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'swarms',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| Hive-main | setup.py |
print("hello there 😊 ")
| Hive-main | hive/main.py |
from logic_guide import LogicGuide, QuoteGuide, AlgebraGuide, MemoryGuide
# Example usage:
model_id="tiiuae/falcon-40b"
logic_guide = LogicGuide(model_id=model_id)
#provide few shot prompt for better results
text = """
Context: Every dog is small. Every feline is a snake. Every animal is not bitter. Sheep are bitter. Cats are
carnivores. Each vertebrate is a mammal. Mammals are felines. Each vertebrate is dull. Snakes are cats.
Cats are not kind. Every snake is not happy. Every sheep is a vertebrate. Each feline is cold. Each dog is a
sheep. Every mammal is not liquid. Every carnivore is a cow. Every carnivore is brown. Alex is a sheep.
Question: True or false: Alex is not bitter.
"""
print(logic_guide.generate(text))
model_id = "tiiuae/falcon-40b"
device = "cuda:0" # Change to "cpu" if you don't have a CUDA-compatible GPU.
# Memory Guide
memory_guide = MemoryGuide()
logic_guide = LogicGuide(model_id=model_id, guide_function=memory_guide, device=device)
text = "[[set:name=OpenAI]] What is your name?"
print(logic_guide.generate(text)) # Output: "My name is OpenAI."
text = "[[get:name=]] What is your name?"
print(logic_guide.generate(text)) # Output: "My name is OpenAI."
# Quote Guide (for this example, we're using Project Gutenberg's "The Adventures of Sherlock Holmes")
quote_guide = QuoteGuide(source="https://www.gutenberg.org/files/1661/1661-h/1661-h.htm")
logic_guide = LogicGuide(model_id=model_id, guide_function=quote_guide, device=device)
text = "[[quote:]] What is a quote from Sherlock Holmes?"
print(logic_guide.generate(text)) # Output: A quote from "The Adventures of Sherlock Holmes" (random quote from the source)
# Algebra Guide
algebra_guide = AlgebraGuide()
logic_guide = LogicGuide(model_id=model_id, guide_function=algebra_guide, device=device)
text = "[[eq]] x^2 + 3x + 2 = 0"
print(logic_guide.generate(text)) # Output: "x^2 + 3x + 2 = 0" (and stores the equation for later)
text = "[[solve:x=]] What is the value of x?"
print(logic_guide.generate(text)) # Output: "The value of x is ..." (the solutions of the equation)
| LOGICGUIDE-main | example_huggingface.py |
from logic_guide import LogicGuide
logic_guide = LogicGuide(openai_api_key='', openai_api_model='gpt4')
#provide few shot prompt for better results
text = """
Context: Every dog is small. Every feline is a snake. Every animal is not bitter. Sheep are bitter. Cats are
carnivores. Each vertebrate is a mammal. Mammals are felines. Each vertebrate is dull. Snakes are cats.
Cats are not kind. Every snake is not happy. Every sheep is a vertebrate. Each feline is cold. Each dog is a
sheep. Every mammal is not liquid. Every carnivore is a cow. Every carnivore is brown. Alex is a sheep.
Question: True or false: Alex is not bitter.
"""
print(logic_guide.generate(text))
| LOGICGUIDE-main | example_openai.py |
from setuptools import setup, find_packages
setup(
name = 'logic_guide',
packages = find_packages(exclude=['examples']),
version = '0.0.1',
license='APACHE',
description = 'Logic Guide - HF',
author = 'Kye Gomez',
author_email = '[email protected]',
url = 'https://github.com/kyegomez/LOGICGUIDE',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'prompt engineering'
],
install_requires=[
'transformers',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | LOGICGUIDE-main | setup.py |
import re
# class LogicGuide:
# def __init__(self):
# self.delimiters = ("t1", "t2") # example delimiters for guiding text extraction
# def guide_function(self, generated_sequence):
# """Function to define a set of valid generations based on previously generated sequences."""
# # Implementation specifics would depend on the underlying logic.
# return set() # returns a set of valid next generations
# def completion_engine(self, input_string):
# """Completion engine that uses Constrained Semantic Decoding (CSD) algorithm."""
# t1, t2 = self.delimiters
# if input_string.endswith(t1):
# sp = self.extract_text_blocks(input_string, t1, t2)
# valid_strings = self.guide_function(sp)
# return self.create_regex(valid_strings, t2)
# else:
# return self.create_regex([], t1) # matches any string not containing t1 and ending in t1
# @staticmethod
# def extract_text_blocks(input_string, t1, t2):
# """Extract blocks of text between two delimiters t1 and t2."""
# # Implementation would extract and return blocks of text based on input string and delimiters
# return ""
# @staticmethod
# def create_regex(valid_strings, ending):
# """Create a regular expression matching valid strings ending with a specific character."""
# # Implementation would return a regex based on valid strings and ending character
# return ""
# def logicguide(self, input_string):
# """The LOGICGUIDE function that utilizes the completion engine and the Peano theorem-proving environment."""
# # The completion engine is called here to generate valid continuations
# completion_engine_output = self.completion_engine(input_string)
# # Integration with the Peano theorem-proving environment would occur here
# # for now, it will just return the output of the completion engine
# return completion_engine_output
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
import torch
from utils.openai import OpenAILanguageModel
class UniversalGuide:
def __init__(self):
pass
def __call__(self, history):
return history
class DigitGuide:
def __init__(self):
self.regex = re.compile(r"\d+")
def __call__(self, history):
if self.regex.match(history):
return self.regex
else:
return None
class GuideFunction:
def __init__(self, tool):
self.tool = tool
def __call__(self, model_output):
return self.tool_check(model_output)
def parity_guide(binary_string):
count = binary_string.count('1')
if count % 2 == 0:
return 1 # belongs to the PARITY language
else:
return 0 # does not belong to the PARITY language
class LogicTool:
def check(self, text):
#pseudocode use a complex logic system to verify the logical consistency of the text
#use a sematic analysis and logical inference system
#use tree
return True
class FactTool:
def check(self, text):
#use a complex fact checking system to verify the factural accuracy of the tex
#implement a system that cross references the text with a reliable database of facts
#use true
return True
logic_guide = GuideFunction(LogicTool())
fact_guide = GuideFunction(FactTool())
#guides from the paper
class MemoryGuide:
def __init__(self):
self.memory = {}
def __call__(self, history):
set_trigger = "[[set:"
get_trigger = "[[get:"
if set_trigger in history:
key_value = history.split(set_trigger, 1)[1].split(']]', 1)[0]
key, value = key_value.split("=")
self.memory[key] = value
return history.replace(set_trigger + key_value + ']]', "")
elif get_trigger in history:
key_value = history.split(get_trigger, 1)[1].split(']]', 1)[0]
key = key_value.split("=")[0]
return history.replace(get_trigger + key_value + ']]', self.memory.get(key, ''))
return history
import requests
from bs4 import BeautifulSoup
class QuoteGuide:
def __init__(self, source):
self.source = source
self.quotes = self.get_quotes_from_source()
def get_quotes_from_source(self):
page = requests.get(self.source)
soup = BeautifulSoup(page.content, 'html.parser')
return [p.text for p in soup.find_all('p')]
def __call__(self, history):
quote_trigger = "[[quote:]]"
if quote_trigger in history:
for quote in self.quotes:
if quote in history:
return history
return history.replace(quote_trigger, '[[quote:' + self.quotes[0] + ']]')
return history
import sympy
class AlgebraGuide:
def __init__(self):
self.variables = {}
def __call__(self, history):
eq_trigger = "[[eq]]"
solve_trigger = "[[solve:"
if eq_trigger in history:
equation = history.split(eq_trigger, 1)[1].split(']]', 1)[0]
for variable in sympy.symbols(equation).free_symbols:
self.variables[variable.name] = variable
return history
elif solve_trigger in history:
var_value = history.split(solve_trigger, 1)[1].split(']]', 1)[0]
var = var_value.split("=")[0]
if var in self.variables:
solution = sympy.solve(self.variables[var])[0]
return history.replace(solve_trigger + var_value + ']]', '[[solve:' + var + '=' + str(solution) + ']]')
return history
class LogicGuide:
def __init__(self, model_id, guide_function=None, device="cuda:0", openai_api_key="", openai_api_base="", openai_api_model=""):
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
self.model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto").to(device)
self.t1 = "[[" # Guide trigger
self.t2 = "]]" # End of trusted generation
self.device = device
# Initializing OpenAI model
self.openai_model = OpenAILanguageModel(api_key=openai_api_key, api_base=openai_api_base, api_model=openai_api_model)
if guide_function:
self.guide_function = guide_function
else:
self.guide_function = self.default_guide_function
def default_guide_function(self, S):
return S
def get_bnb_config(self):
return BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
def guide(self, S):
return self.guide_function(S)
def get_blocks(self, s):
blocks = []
split_s = s.split(self.t1)
for block in split_s[1:]:
if self.t2 in block:
blocks.append(block.split(self.t2)[0])
return blocks
def generate(self, text, max_new_tokens=20):
inputs = self.tokenizer(text, return_tensors="pt").to(self.device)
# If guide tool is invoked, invoke guide function
if self.t1 in text:
text = self.guide(text)
outputs = self.model.generate(**inputs, max_new_tokens=max_new_tokens)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# Example usage:
model_id="tiiuae/falcon-40b"
logic_guide = LogicGuide(model_id=model_id)
#provide few shot prompt for better results
text = """
Context: Every dog is small. Every feline is a snake. Every animal is not bitter. Sheep are bitter. Cats are
carnivores. Each vertebrate is a mammal. Mammals are felines. Each vertebrate is dull. Snakes are cats.
Cats are not kind. Every snake is not happy. Every sheep is a vertebrate. Each feline is cold. Each dog is a
sheep. Every mammal is not liquid. Every carnivore is a cow. Every carnivore is brown. Alex is a sheep.
Question: True or false: Alex is not bitter.
"""
print(logic_guide.generate(text))
model_id = "tiiuae/falcon-40b"
device = "cuda:0" # Change to "cpu" if you don't have a CUDA-compatible GPU.
# Memory Guide
memory_guide = MemoryGuide()
logic_guide = LogicGuide(model_id=model_id, guide_function=memory_guide, device=device)
text = "[[set:name=OpenAI]] What is your name?"
print(logic_guide.generate(text)) # Output: "My name is OpenAI."
text = "[[get:name=]] What is your name?"
print(logic_guide.generate(text)) # Output: "My name is OpenAI."
# Quote Guide (for this example, we're using Project Gutenberg's "The Adventures of Sherlock Holmes")
quote_guide = QuoteGuide(source="https://www.gutenberg.org/files/1661/1661-h/1661-h.htm")
logic_guide = LogicGuide(model_id=model_id, guide_function=quote_guide, device=device)
text = "[[quote:]] What is a quote from Sherlock Holmes?"
print(logic_guide.generate(text)) # Output: A quote from "The Adventures of Sherlock Holmes" (random quote from the source)
# Algebra Guide
algebra_guide = AlgebraGuide()
logic_guide = LogicGuide(model_id=model_id, guide_function=algebra_guide, device=device)
text = "[[eq]] x^2 + 3x + 2 = 0"
print(logic_guide.generate(text)) # Output: "x^2 + 3x + 2 = 0" (and stores the equation for later)
text = "[[solve:x=]] What is the value of x?"
print(logic_guide.generate(text)) # Output: "The value of x is ..." (the solutions of the equation)
| LOGICGUIDE-main | logic_guide/logicguide.py |
from logic_guide.logicguide import AlgebraGuide, LogicGuide, QuoteGuide, MemoryGuide, FactTool, LogicTool, GuideFunction, DigitGuide, UniversalGuide | LOGICGUIDE-main | logic_guide/__init__.py |
import os
import openai
import time
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class OpenAILanguageModel:
def __init__(self, api_key, api_base="", api_model=""):
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base == None:
api_base = os.environ.get("OPENAI_API_BASE", "")
if api_base != "":
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_solution(self, initial_prompt):
try:
prompt = f"""
@LogicGuide AI
You are a highly logical entity, capable of problem-solving and decision-making. Given any task, you should structure your approach according to the following pseudocode:
TASK ARCHITECTURE:
1. Task Decomposition: Break down the problem into smaller, more manageable parts.
2. Data Gathering: Identify the necessary information and data needed to solve each part.
3. Algorithm Design: Develop an algorithm to solve each part of the problem.
4. Implementation: Apply the designed algorithms to solve each part.
5. Integration: Combine the solutions of each part to solve the whole problem.
6. Verification: Check if the solution meets the problem's requirements and makes logical sense.
"""
answer = self.openai_api_call_handler(prompt, 300, 0.5, 1)
answer_text = self.openai_choice2text_handler(answer.choices[0])
print(f'Solution: {answer_text}')
return answer_text
except Exception as e:
logger.error(f"Error in generate_solution: {e}")
return None
| LOGICGUIDE-main | logic_guide/utils/openai.py |
from setuptools import setup, find_packages
setup(
name = 'swarms',
packages = find_packages(exclude=[]),
version = '1.4.1',
license='MIT',
description = 'Swarms - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/swarms',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'transformers',
'openai',
'langchain==0.0.240',
'asyncio',
'nest_asyncio',
'pegasusx',
'google-generativeai',
'oceandb',
'langchain-experimental',
'playwright',
'duckduckgo_search',
'faiss-cpu',
'wget',
'httpx',
'ggl',
'beautifulsoup4',
'pydantic',
'tenacity',
'celery',
'redis',
'google-search-results==2.4.2',
'Pillow',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | swarms-master | setup.py |
from swarms import Worker
node = Worker(
openai_api_key="",
ai_name="Optimus Prime",
)
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
response = node.run(task)
print(response) | swarms-master | example.py |
from swarms import worker_node
# Your OpenAI API key
api_key = "sksdsds"
# Initialize a WorkerNode with your API key
node = worker_node(api_key)
# Define an objective
objective = "Please make a web GUI for using HTTP API server..."
# Run the task
task = node.run(objective)
print(task)
| swarms-master | playground/worker_auto.py |
from swarms import WorkerUltraUltraNode
# Define an objective
objective = """
Please make a web GUI for using HTTP API server.
The name of it is Swarms.
You can check the server code at ./main.py.
The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500.
"""
node = WorkerUltraUltraNode(objective)
result = node.execute() | swarms-master | playground/ultranode_example.py |
from swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = "sksdsds"
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective
objective = """
Please develop and serve a simple web TODO app.
The user can list all TODO items and add or delete each TODO item.
I want it to have neumorphism-style.
The ports you can use are 4500 and 6500.
"""
# Run HierarchicalSwarm
swarm.run(objective) | swarms-master | playground/todo_app.py |
from swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = ""
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(api_key)
# Define an objective
objective = "Find 20 potential customers for a HierarchicalSwarm based AI Agent automation infrastructure"
# Run HierarchicalSwarm
swarm.run(objective) | swarms-master | playground/swarms_example.py |
import os
from swarms.swarms.swarms import WorkerUltra
api_key = os.getenv("OPENAI_API_KEY")
# Define an objective
objective = """
Please make a web GUI for using HTTP API server.
The name of it is Swarms.
You can check the server code at ./main.py.
The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500.
"""
# Create an instance of WorkerUltra
worker = WorkerUltra(objective, api_key)
# Execute the task
result = worker.execute()
# Print the result
print(result) | swarms-master | playground/worker_ultra.py |
swarms-master | playground/DIY.py |
|
from swarms import swarm
# Use the function
api_key = "APIKEY"
objective = "What is the capital of the UK?"
result = swarm(api_key, objective)
print(result) # Prints: "The capital of the UK is London."
| swarms-master | playground/easy_example.py |
from swarms import AutoScaler
auto_scaler = AutoScaler()
auto_scaler.start()
for i in range(100):
auto_scaler.add_task(f"Task {i}")
| swarms-master | playground/autoscaler.py |
from swarms.structs.workflow import Workflow
workflow = Workflow()
workflow.add('Find 50 ceos in linkedin in agriculture ')
| swarms-master | playground/workflow.py |
from ..swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = "sksdsds"
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective
objective = """
Please develop and serve a simple community web service.
People can signup, login, post, comment.
Post and comment should be visible at once.
I want it to have neumorphism-style.
The ports you can use are 4500 and 6500.
"""
# Run HierarchicalSwarm
swarm.run(objective) | swarms-master | playground/social_app.py |
from swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = "sksdsds"
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective
objective = """
Please make a web GUI for using HTTP API server.
The name of it is HierarchicalSwarm.
You can check the server code at ./main.py.
The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500.
"""
# Run HierarchicalSwarm
swarm.run(objective) | swarms-master | playground/gui_app.py |
from swarms import HierarchicalSwarm
swarm = HierarchicalSwarm(
openai_api_key="key",
model_type="openai",
model_id="gpt-4",
use_vectorstore=False,
use_async=False,
human_in_the_loop=False,
logging_enabled=False
)
#run the swarm with an objective
result = swarm.run("Design a new car")
#or huggingface
swarm = HierarchicalSwarm(
model_type="huggingface",
model_id="tiaueu/falcon",
use_vectorstore=True,
embedding_size=768,
use_async=False,
human_in_the_loop=True,
logging_enabled=False,
)
# Run the swarm with a particular objective
result = swarm.run("Write a sci-fi short story")
| swarms-master | playground/DIY/hierchical.py |
import pytest
from unittest.mock import Mock
from swarms.swarms.orchestrate import Orchestrator
@pytest.fixture
def mock_agent():
return Mock()
@pytest.fixture
def mock_task():
return {"task_id": 1, "task_data": "data"}
@pytest.fixture
def mock_vector_db():
return Mock()
@pytest.fixture
def orchestrator(mock_agent, mock_vector_db):
agent_list = [mock_agent for _ in range(5)]
task_queue = []
return Orchestrator(mock_agent, agent_list, task_queue, mock_vector_db)
def test_assign_task(orchestrator, mock_agent, mock_task, mock_vector_db):
orchestrator.task_queue.append(mock_task)
orchestrator.assign_task(0, mock_task)
mock_agent.process_task.assert_called_once()
mock_vector_db.add_documents.assert_called_once()
def test_retrieve_results(orchestrator, mock_vector_db):
mock_vector_db.query.return_value = "expected_result"
assert orchestrator.retrieve_results(0) == "expected_result"
def test_update_vector_db(orchestrator, mock_vector_db):
data = {"vector": [0.1, 0.2, 0.3], "task_id": 1}
orchestrator.update_vector_db(data)
mock_vector_db.add_documents.assert_called_once_with([data['vector']], [str(data['task_id'])])
def test_get_vector_db(orchestrator, mock_vector_db):
assert orchestrator.get_vector_db() == mock_vector_db
def test_append_to_db(orchestrator, mock_vector_db):
collection = "test_collection"
result = "test_result"
orchestrator.append_to_db(collection, result)
mock_vector_db.append_document.assert_called_once_with(collection, result, id=str(id(result)))
def test_run(orchestrator, mock_agent, mock_vector_db):
objective = "test_objective"
collection = "test_collection"
orchestrator.run(objective, collection)
mock_agent.process_task.assert_called()
mock_vector_db.append_document.assert_called()
| swarms-master | tests/orchestrate.py |
import pytest
import logging
from unittest.mock import patch
from swarms.swarms.swarms import HierarchicalSwarm # replace with your actual module name
@pytest.fixture
def swarm():
return HierarchicalSwarm(
model_id='gpt-4',
openai_api_key='some_api_key',
use_vectorstore=True,
embedding_size=1024,
use_async=False,
human_in_the_loop=True,
model_type='openai',
boss_prompt='boss',
worker_prompt='worker',
temperature=0.5,
max_iterations=100,
logging_enabled=True
)
@pytest.fixture
def swarm_no_logging():
return HierarchicalSwarm(logging_enabled=False)
def test_swarm_init(swarm):
assert swarm.model_id == 'gpt-4'
assert swarm.openai_api_key == 'some_api_key'
assert swarm.use_vectorstore
assert swarm.embedding_size == 1024
assert not swarm.use_async
assert swarm.human_in_the_loop
assert swarm.model_type == 'openai'
assert swarm.boss_prompt == 'boss'
assert swarm.worker_prompt == 'worker'
assert swarm.temperature == 0.5
assert swarm.max_iterations == 100
assert swarm.logging_enabled
assert isinstance(swarm.logger, logging.Logger)
def test_swarm_no_logging_init(swarm_no_logging):
assert not swarm_no_logging.logging_enabled
assert swarm_no_logging.logger.disabled
@patch('your_module.OpenAI')
@patch('your_module.HuggingFaceLLM')
def test_initialize_llm(mock_huggingface, mock_openai, swarm):
swarm.initialize_llm('openai')
mock_openai.assert_called_once_with(openai_api_key='some_api_key', temperature=0.5)
swarm.initialize_llm('huggingface')
mock_huggingface.assert_called_once_with(model_id='gpt-4', temperature=0.5)
@patch('your_module.HierarchicalSwarm.initialize_llm')
def test_initialize_tools(mock_llm, swarm):
mock_llm.return_value = 'mock_llm_class'
tools = swarm.initialize_tools('openai')
assert 'mock_llm_class' in tools
@patch('your_module.HierarchicalSwarm.initialize_llm')
def test_initialize_tools_with_extra_tools(mock_llm, swarm):
mock_llm.return_value = 'mock_llm_class'
tools = swarm.initialize_tools('openai', extra_tools=['tool1', 'tool2'])
assert 'tool1' in tools
assert 'tool2' in tools
@patch('your_module.OpenAIEmbeddings')
@patch('your_module.FAISS')
def test_initialize_vectorstore(mock_faiss, mock_openai_embeddings, swarm):
mock_openai_embeddings.return_value.embed_query = 'embed_query'
swarm.initialize_vectorstore()
mock_faiss.assert_called_once_with('embed_query', instance_of(faiss.IndexFlatL2), instance_of(InMemoryDocstore), {})
| swarms-master | tests/swarms.py |
import pytest
from unittest.mock import Mock, patch
from swarms.agents.agents import AgentNodeInitializer, AgentNode, agent # replace with actual import
# For initializing AgentNodeInitializer in multiple tests
@pytest.fixture
def mock_agent_node_initializer():
with patch('swarms.agents.agents.ChatOpenAI') as mock_llm, \
patch('swarms.agents.agents.AutoGPT') as mock_agent:
initializer = AgentNodeInitializer(model_type='openai', model_id='test', openai_api_key='test_key', temperature=0.5)
initializer.llm = mock_llm
initializer.tools = [Mock(spec=BaseTool)]
initializer.vectorstore = Mock()
initializer.agent = mock_agent
return initializer
# Test initialize_llm method of AgentNodeInitializer class
@pytest.mark.parametrize("model_type", ['openai', 'huggingface', 'invalid'])
def test_agent_node_initializer_initialize_llm(model_type, mock_agent_node_initializer):
with patch('swarms.agents.agents.ChatOpenAI') as mock_openai, \
patch('swarms.agents.agents.HuggingFaceLLM') as mock_huggingface:
if model_type == 'invalid':
with pytest.raises(ValueError):
mock_agent_node_initializer.initialize_llm(model_type, 'model_id', 'openai_api_key', 0.5)
else:
mock_agent_node_initializer.initialize_llm(model_type, 'model_id', 'openai_api_key', 0.5)
if model_type == 'openai':
mock_openai.assert_called_once()
elif model_type == 'huggingface':
mock_huggingface.assert_called_once()
# Test add_tool method of AgentNodeInitializer class
def test_agent_node_initializer_add_tool(mock_agent_node_initializer):
with patch('swarms.agents.agents.BaseTool') as mock_base_tool:
mock_agent_node_initializer.add_tool(mock_base_tool)
assert mock_base_tool in mock_agent_node_initializer.tools
# Test run method of AgentNodeInitializer class
@pytest.mark.parametrize("prompt", ['valid prompt', ''])
def test_agent_node_initializer_run(prompt, mock_agent_node_initializer):
if prompt == '':
with pytest.raises(ValueError):
mock_agent_node_initializer.run(prompt)
else:
assert mock_agent_node_initializer.run(prompt) == "Task completed by AgentNode"
# For initializing AgentNode in multiple tests
@pytest.fixture
def mock_agent_node():
with patch('swarms.agents.agents.ChatOpenAI') as mock_llm, \
patch('swarms.agents.agents.AgentNodeInitializer') as mock_agent_node_initializer:
mock_agent_node = AgentNode('test_key')
mock_agent_node.llm_class = mock_llm
mock_agent_node.vectorstore = Mock()
mock_agent_node_initializer.llm = mock_llm
return mock_agent_node
# Test initialize_llm method of AgentNode class
@pytest.mark.parametrize("llm_class", ['openai', 'huggingface'])
def test_agent_node_initialize_llm(llm_class, mock_agent_node):
with patch('swarms.agents.agents.ChatOpenAI') as mock_openai, \
patch('swarms.agents.agents.HuggingFaceLLM') as mock_huggingface:
mock_agent_node.initialize_llm(llm_class)
if llm_class == 'openai':
mock_openai.assert_called_once()
elif llm_class == 'huggingface':
mock_huggingface.assert_called_once()
# Test initialize_tools method of AgentNode class
def test_agent_node_initialize_tools(mock_agent_node):
with patch('swarms.agents.agents.DuckDuckGoSearchRun') as mock_ddg, \
patch('swarms.agents.agents.WriteFileTool') as mock_write_file, \
patch('swarms.agents.agents.ReadFileTool') as mock_read_file, \
patch('swarms.agents.agents.process_csv') as mock_process_csv, \
patch('swarms.agents.agents.WebpageQATool') as mock_webpage_qa:
mock_agent_node.initialize_tools('openai')
assert mock_ddg.called
assert mock_write_file.called
assert mock_read_file.called
assert mock_process_csv.called
assert mock_webpage_qa.called
# Test create_agent method of AgentNode class
def test_agent_node_create_agent(mock_agent_node):
with patch.object(mock_agent_node, 'initialize_llm'), \
patch.object(mock_agent_node, 'initialize_tools'), \
patch.object(mock_agent_node, 'initialize_vectorstore'), \
patch('swarms.agents.agents.AgentNodeInitializer') as mock_agent_node_initializer:
mock_agent_node.create_agent()
mock_agent_node_initializer.assert_called_once()
mock_agent_node_initializer.return_value.create_agent.assert_called_once()
# Test agent function
@pytest.mark.parametrize("openai_api_key,objective", [('valid_key', 'valid_objective'), ('', 'valid_objective'), ('valid_key', '')])
def test_agent(openai_api_key, objective):
if openai_api_key == '' or objective == '':
with pytest.raises(ValueError):
agent(openai_api_key, objective)
else:
with patch('swarms.agents.agents.AgentNodeInitializer') as mock_agent_node_initializer:
mock_agent_node = mock_agent_node_initializer.return_value.create_agent.return_value
mock_agent_node.run.return_value = 'Agent output'
result = agent(openai_api_key, objective)
assert result == 'Agent output'
| swarms-master | tests/agents/agents.py |
import pytest
from unittest.mock import Mock
from swarms.memory.oceandb import OceanDB
@pytest.fixture
def mock_ocean_client():
return Mock()
@pytest.fixture
def mock_collection():
return Mock()
@pytest.fixture
def ocean_db(mock_ocean_client):
OceanDB.client = mock_ocean_client
return OceanDB()
def test_init(ocean_db, mock_ocean_client):
mock_ocean_client.heartbeat.return_value = "OK"
assert ocean_db.client.heartbeat() == "OK"
def test_create_collection(ocean_db, mock_ocean_client, mock_collection):
mock_ocean_client.create_collection.return_value = mock_collection
collection = ocean_db.create_collection("test", "text")
assert collection == mock_collection
def test_append_document(ocean_db, mock_collection):
document = "test_document"
id = "test_id"
ocean_db.append_document(mock_collection, document, id)
mock_collection.add.assert_called_once_with(documents=[document], ids=[id])
def test_add_documents(ocean_db, mock_collection):
documents = ["test_document1", "test_document2"]
ids = ["test_id1", "test_id2"]
ocean_db.add_documents(mock_collection, documents, ids)
mock_collection.add.assert_called_once_with(documents=documents, ids=ids)
def test_query(ocean_db, mock_collection):
query_texts = ["test_query"]
n_results = 10
mock_collection.query.return_value = "query_result"
result = ocean_db.query(mock_collection, query_texts, n_results)
assert result == "query_result"
| swarms-master | tests/agents/memory/main.py |
import unittest
import os
from unittest.mock import patch
from langchain import HuggingFaceHub, ChatOpenAI
from swarms.models.llm import LLM
class TestLLM(unittest.TestCase):
@patch.object(HuggingFaceHub, '__init__', return_value=None)
@patch.object(ChatOpenAI, '__init__', return_value=None)
def setUp(self, mock_hf_init, mock_openai_init):
self.llm_openai = LLM(openai_api_key='mock_openai_key')
self.llm_hf = LLM(hf_repo_id='mock_repo_id', hf_api_token='mock_hf_token')
self.prompt = "Who won the FIFA World Cup in 1998?"
def test_init(self):
self.assertEqual(self.llm_openai.openai_api_key, 'mock_openai_key')
self.assertEqual(self.llm_hf.hf_repo_id, 'mock_repo_id')
self.assertEqual(self.llm_hf.hf_api_token, 'mock_hf_token')
@patch.object(HuggingFaceHub, 'run', return_value="France")
@patch.object(ChatOpenAI, 'run', return_value="France")
def test_run(self, mock_hf_run, mock_openai_run):
result_openai = self.llm_openai.run(self.prompt)
mock_openai_run.assert_called_once()
self.assertEqual(result_openai, "France")
result_hf = self.llm_hf.run(self.prompt)
mock_hf_run.assert_called_once()
self.assertEqual(result_hf, "France")
def test_error_on_no_keys(self):
with self.assertRaises(ValueError):
LLM()
@patch.object(os, 'environ', {})
def test_error_on_missing_hf_token(self):
with self.assertRaises(ValueError):
LLM(hf_repo_id='mock_repo_id')
@patch.dict(os.environ, {"HUGGINGFACEHUB_API_TOKEN": "mock_hf_token"})
def test_hf_token_from_env(self):
llm = LLM(hf_repo_id='mock_repo_id')
self.assertEqual(llm.hf_api_token, "mock_hf_token")
if __name__ == '__main__':
unittest.main()
| swarms-master | tests/agents/models/LLM.py |
import pytest
import torch
from unittest.mock import Mock
from swarms.models.huggingface import HuggingFaceLLM
@pytest.fixture
def mock_torch():
return Mock()
@pytest.fixture
def mock_autotokenizer():
return Mock()
@pytest.fixture
def mock_automodelforcausallm():
return Mock()
@pytest.fixture
def mock_bitsandbytesconfig():
return Mock()
@pytest.fixture
def hugging_face_llm(mock_torch, mock_autotokenizer, mock_automodelforcausallm, mock_bitsandbytesconfig):
HuggingFaceLLM.torch = mock_torch
HuggingFaceLLM.AutoTokenizer = mock_autotokenizer
HuggingFaceLLM.AutoModelForCausalLM = mock_automodelforcausallm
HuggingFaceLLM.BitsAndBytesConfig = mock_bitsandbytesconfig
return HuggingFaceLLM(model_id='test')
def test_init(hugging_face_llm, mock_autotokenizer, mock_automodelforcausallm):
assert hugging_face_llm.model_id == 'test'
mock_autotokenizer.from_pretrained.assert_called_once_with('test')
mock_automodelforcausallm.from_pretrained.assert_called_once_with('test', quantization_config=None)
def test_init_with_quantize(hugging_face_llm, mock_autotokenizer, mock_automodelforcausallm, mock_bitsandbytesconfig):
quantization_config = {
'load_in_4bit': True,
'bnb_4bit_use_double_quant': True,
'bnb_4bit_quant_type': "nf4",
'bnb_4bit_compute_dtype': torch.bfloat16
}
mock_bitsandbytesconfig.return_value = quantization_config
HuggingFaceLLM(model_id='test', quantize=True)
mock_bitsandbytesconfig.assert_called_once_with(**quantization_config)
mock_autotokenizer.from_pretrained.assert_called_once_with('test')
mock_automodelforcausallm.from_pretrained.assert_called_once_with('test', quantization_config=quantization_config)
def test_generate_text(hugging_face_llm):
prompt_text = 'test prompt'
expected_output = 'test output'
hugging_face_llm.tokenizer.encode.return_value = torch.tensor([0]) # Mock tensor
hugging_face_llm.model.generate.return_value = torch.tensor([0]) # Mock tensor
hugging_face_llm.tokenizer.decode.return_value = expected_output
output = hugging_face_llm.generate_text(prompt_text)
assert output == expected_output
| swarms-master | tests/agents/models/hf.py |
import pytest
from unittest.mock import MagicMock, patch
from swarms.worker.worker_node import WorkerNodeInitializer, WorkerNode # replace your_module with actual module name
# Mock Tool for testing
class MockTool(Tool):
pass
# Fixture for llm
@pytest.fixture
def mock_llm():
return MagicMock()
# Fixture for vectorstore
@pytest.fixture
def mock_vectorstore():
return MagicMock()
# Fixture for Tools
@pytest.fixture
def mock_tools():
return [MockTool(), MockTool(), MockTool()]
# Fixture for WorkerNodeInitializer
@pytest.fixture
def worker_node(mock_llm, mock_tools, mock_vectorstore):
return WorkerNodeInitializer(llm=mock_llm, tools=mock_tools, vectorstore=mock_vectorstore)
# Fixture for WorkerNode
@pytest.fixture
def mock_worker_node():
return WorkerNode(openai_api_key="test_api_key")
# WorkerNodeInitializer Tests
def test_worker_node_init(worker_node):
assert worker_node.llm is not None
assert worker_node.tools is not None
assert worker_node.vectorstore is not None
def test_worker_node_create_agent(worker_node):
with patch.object(AutoGPT, 'from_llm_and_tools') as mock_method:
worker_node.create_agent()
mock_method.assert_called_once()
def test_worker_node_add_tool(worker_node):
initial_tools_count = len(worker_node.tools)
new_tool = MockTool()
worker_node.add_tool(new_tool)
assert len(worker_node.tools) == initial_tools_count + 1
def test_worker_node_run(worker_node):
with patch.object(worker_node.agent, 'run') as mock_run:
worker_node.run(prompt="test prompt")
mock_run.assert_called_once()
# WorkerNode Tests
def test_worker_node_llm(mock_worker_node):
with patch.object(mock_worker_node, 'initialize_llm') as mock_method:
mock_worker_node.initialize_llm(llm_class=MagicMock(), temperature=0.5)
mock_method.assert_called_once()
def test_worker_node_tools(mock_worker_node):
with patch.object(mock_worker_node, 'initialize_tools') as mock_method:
mock_worker_node.initialize_tools(llm_class=MagicMock())
mock_method.assert_called_once()
def test_worker_node_vectorstore(mock_worker_node):
with patch.object(mock_worker_node, 'initialize_vectorstore') as mock_method:
mock_worker_node.initialize_vectorstore()
mock_method.assert_called_once()
def test_worker_node_create_worker_node(mock_worker_node):
with patch.object(mock_worker_node, 'create_worker_node') as mock_method:
mock_worker_node.create_worker_node()
mock_method.assert_called_once()
| swarms-master | tests/agents/workers/worker_node.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.