prompt
large_stringlengths
72
9.34k
completion
large_stringlengths
0
7.61k
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): <|fim_middle|> def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
if action == "rec": return 0 elif action == "click" : return 1 else: return 2
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): <|fim_middle|> def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): <|fim_middle|> #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): <|fim_middle|> #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop;
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): <|fim_middle|> <|fim▁end|>
popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": <|fim_middle|> elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
return 0
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : <|fim_middle|> else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
return 1
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: <|fim_middle|> def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
return 2
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line <|fim_middle|> records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
lineNum += 1 continue
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": <|fim_middle|> elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
pop = 1
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": <|fim_middle|> elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
pop = 10
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": <|fim_middle|> return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
pop = 100
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def <|fim_middle|>(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
getActionScore
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def <|fim_middle|>(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
compute_interaction
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def <|fim_middle|>(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
compute_user_history_interaction
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def <|fim_middle|>(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def compute_user_popularity(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
get_action_weight
<|file_name|>compute_user_popularity.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # compute the times of action(rec|click|msg) for each user from math import sqrt def getActionScore(action): if action == "rec": return 0 elif action == "click" : return 1 else: return 2 def compute_interaction(data): interaction = {} for line in data: (userA,userB,times,action) = line.split(' ') action = action[:-1] key = userB + " " + action interaction.setdefault(key, 0) interaction[key] += 1 return interaction def compute_user_history_interaction(trainFile): records = [] lineList = [] lineNum = 1 result = [] lineList = [line for line in file(trainFile)] for line in lineList: if lineNum == 1: #ignore the title in first line lineNum += 1 continue records.append(line) lineNum += 1 interaction = compute_interaction(records) out = file('user_interaction.txt', 'w') for (key, times) in interaction.items(): out.write('%s %d' % (key, times)) out.write('\n') for (key, times) in interaction.items(): user, action = key.split(' '); result.append((user, action, times)) return result #get the weight for each type of action def get_action_weight(action): pop = 0; if action == "rec": pop = 1 elif action == "click": pop = 10 elif action == "msg": pop = 100 return pop; #trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)] def <|fim_middle|>(trainFile, user_popularity_file): popDict = {} rankedscores = [] result = [] print "-----compute_user_history_interaction ... " interaction = compute_user_history_interaction(trainFile) print "-----compute_user_popularity ... " for (user, action, times) in interaction[0:len(interaction)]: popDict.setdefault(user, 0) popDict[user] += get_action_weight(action) * times ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()] ranked_popularity.sort() ranked_popularity.reverse() print "-----ranking_user_popularity ... " result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]] print "-----output user_popularity ... " out = file(user_popularity_file, 'w') for (user, popularity) in result[0:len(result)]: out.write('%s %d\n' % (user, popularity)) print "-----Ending ... " return result<|fim▁end|>
compute_user_popularity
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood)<|fim▁hole|> gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main()<|fim▁end|>
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): <|fim_middle|> class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): <|fim_middle|> class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): <|fim_middle|> def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood )
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): <|fim_middle|> class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): <|fim_middle|> if __name__ == "__main__": unittest.main() <|fim▁end|>
def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): <|fim_middle|> def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): <|fim_middle|> def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings <|fim_middle|> def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings <|fim_middle|> def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings <|fim_middle|> if __name__ == "__main__": unittest.main() <|fim▁end|>
warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: <|fim_middle|> return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda()
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": <|fim_middle|> def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): <|fim_middle|> random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
torch.cuda.manual_seed_all(0)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): <|fim_middle|> def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
torch.set_rng_state(self.rng_state)
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): <|fim_middle|> with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
return
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": <|fim_middle|> <|fim▁end|>
unittest.main()
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def <|fim_middle|>(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
make_data
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def <|fim_middle|>(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
__init__
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def <|fim_middle|>(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
forward
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def <|fim_middle|>(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
setUp
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def <|fim_middle|>(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
tearDown
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def <|fim_middle|>(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
test_sgpr_mean_abs_error
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def <|fim_middle|>(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def test_sgpr_mean_abs_error_cuda(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
test_sgpr_fast_pred_var
<|file_name|>test_sgpr_regression.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import os import random import unittest import warnings from math import exp, pi import gpytorch import torch from gpytorch.distributions import MultivariateNormal from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean from gpytorch.priors import SmoothedBoxPrior from gpytorch.test.utils import least_used_cuda_device from gpytorch.utils.warnings import NumericalWarning from torch import optim # Simple training data: let's try to learn a sine function, # but with SGPR # let's use 100 training examples. def make_data(cuda=False): train_x = torch.linspace(0, 1, 100) train_y = torch.sin(train_x * (2 * pi)) train_y.add_(torch.randn_like(train_y), alpha=1e-2) test_x = torch.rand(51) test_y = torch.sin(test_x * (2 * pi)) if cuda: train_x = train_x.cuda() train_y = train_y.cuda() test_x = test_x.cuda() test_y = test_y.cuda() return train_x, train_y, test_x, test_y class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5)) self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1))) self.covar_module = InducingPointKernel( self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x) class TestSGPRRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) random.seed(0) def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) def test_sgpr_mean_abs_error(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(30): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.05) def test_sgpr_fast_pred_var(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) train_x, train_y, test_x, test_y = make_data() likelihood = GaussianLikelihood() gp_model = GPRegressionModel(train_x, train_y, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50): with gpytorch.settings.fast_pred_var(True): fast_var = gp_model(test_x).variance fast_var_cache = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3) with gpytorch.settings.fast_pred_var(False): slow_var = gp_model(test_x).variance self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3) def <|fim_middle|>(self): # Suppress numerical warnings warnings.simplefilter("ignore", NumericalWarning) if not torch.cuda.is_available(): return with least_used_cuda_device(): train_x, train_y, test_x, test_y = make_data(cuda=True) likelihood = GaussianLikelihood().cuda() gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(25): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() for param in gp_model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) # Test the model gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean mean_abs_error = torch.mean(torch.abs(test_y - test_preds)) self.assertLess(mean_abs_error.squeeze().item(), 0.02) if __name__ == "__main__": unittest.main() <|fim▁end|>
test_sgpr_mean_abs_error_cuda
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project<|fim▁hole|> from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env))<|fim▁end|>
import os import shutil import sys import SocketServer
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): <|fim_middle|> def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH)
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): <|fim_middle|> def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Build local version of site""" local('pelican -s pelicanconf.py')
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): <|fim_middle|> def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""`build` with the delete switch""" local('pelican -d -s pelicanconf.py')
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): <|fim_middle|> def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py')
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): <|fim_middle|> def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever()
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): <|fim_middle|> server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
allow_reuse_address = True
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): <|fim_middle|> def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""`build`, then `serve`""" build() serve()
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): <|fim_middle|> def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Build production version of site""" local('pelican -s publishconf.py')
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): <|fim_middle|> @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env))
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): <|fim_middle|> def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
"""Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', )
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): <|fim_middle|> <|fim▁end|>
"""Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env))
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): <|fim_middle|> def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH)
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def <|fim_middle|>(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
clean
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def <|fim_middle|>(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
build
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def <|fim_middle|>(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
rebuild
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def <|fim_middle|>(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
regenerate
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def <|fim_middle|>(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
serve
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def <|fim_middle|>(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
reserve
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def <|fim_middle|>(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
preview
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def <|fim_middle|>(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
cf_upload
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def <|fim_middle|>(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
publish
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def <|fim_middle|>(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env)) <|fim▁end|>
gh_pages
<|file_name|>parts_descriptor_test.py<|end_file_name|><|fim▁begin|>import amitgroup as ag import numpy as np <|fim▁hole|> pd = ag.features.PartsDescriptor((5, 5), 20, patch_frame=1, edges_threshold=5, samples_per_image=10) # Use only 100 of the digits pd.train_from_images(data) # Save the model to a file. #pd.save('parts_model.npy') # You can then load it again by #pd = ag.features.PartsDescriptor.load(filename) # Then you can extract features by #features = pd.extract_features(image) # Visualize the parts ag.plot.images(pd.visparts)<|fim▁end|>
ag.set_verbose(True) # This requires you to have the MNIST data set. data, digits = ag.io.load_mnist('training', selection=slice(0, 100))
<|file_name|>util.py<|end_file_name|><|fim▁begin|># proxy module<|fim▁hole|><|fim▁end|>
from apptools.logger.util import *
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer <|fim▁hole|> scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF =============================================<|fim▁end|>
tested with iTHX-W """
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): <|fim_middle|> # ============= EOF ============================================= <|fim▁end|>
""" http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value()
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): <|fim_middle|> def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF ============================================= <|fim▁end|>
v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v)
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): <|fim_middle|> def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF ============================================= <|fim▁end|>
v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v)
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): <|fim_middle|> # ============= EOF ============================================= <|fim▁end|>
try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value()
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def <|fim_middle|>(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF ============================================= <|fim▁end|>
read_temperature
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def <|fim_middle|>(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def _parse_response(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF ============================================= <|fim▁end|>
read_humidity
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.hardware.core.core_device import CoreDevice class TempHumMicroServer(CoreDevice): """ http://www.omega.com/Manuals/manualpdf/M3861.pdf iServer MicroServer tested with iTHX-W """ scan_func = 'read_temperature' def read_temperature(self, **kw): v = self.ask('*SRTF', timeout=1.0, **kw) return self._parse_response(v) def read_humidity(self, **kw): v = self.ask('*SRH', timeout=1.0, **kw) return self._parse_response(v) def <|fim_middle|>(self, v): try: return float(v) except (AttributeError, ValueError, TypeError): return self.get_random_value() # ============= EOF ============================================= <|fim▁end|>
_parse_response
<|file_name|>testroute.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from .base import BaseHandler<|fim▁hole|>class TestRoute(BaseHandler): def get(self, file): return self.render(str(file) + '.jade', show_h1=1)<|fim▁end|>
<|file_name|>testroute.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from .base import BaseHandler class TestRoute(BaseHandler): <|fim_middle|> <|fim▁end|>
def get(self, file): return self.render(str(file) + '.jade', show_h1=1)
<|file_name|>testroute.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from .base import BaseHandler class TestRoute(BaseHandler): def get(self, file): <|fim_middle|> <|fim▁end|>
return self.render(str(file) + '.jade', show_h1=1)
<|file_name|>testroute.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from .base import BaseHandler class TestRoute(BaseHandler): def <|fim_middle|>(self, file): return self.render(str(file) + '.jade', show_h1=1) <|fim▁end|>
get
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) <|fim▁hole|><|fim▁end|>
print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): <|fim_middle|> if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): <|fim_middle|> def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids)))
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): <|fim_middle|> def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): <|fim_middle|> # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
return self.site.storage.getSize(key)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): <|fim_middle|> def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): <|fim_middle|> def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): <|fim_middle|> def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): <|fim_middle|> def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): <|fim_middle|> def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): <|fim_middle|> def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): <|fim_middle|> def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): <|fim_middle|> def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): <|fim_middle|> def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): <|fim_middle|> if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
params["site_id"] = self.db_id return self.db.execute(query, params)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: <|fim_middle|> content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: <|fim_middle|> else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack()))
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: <|fim_middle|> content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): <|fim_middle|> raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.__delitem__(key) # File not exists anymore
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: <|fim_middle|> def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char <|fim_middle|> def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
self.cached_keys.append(key)