file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sqlite1.py | import sqlite3
import os.path
import sys
import random
def makeDatabase(databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
conn = sqlite3.connect(databaseName)
conn.commit()
conn.close()
def listToString(list):
string = ""
for i in list:
string += str(i)+"\t"
return string[:-1]
def stringToList(string):
list = [str(line) for line in string.split('\t')]
return list
#class for connecting, inserting, and retrieving information from a sqlite3 database
class SqliteDB:
#connects to the database, alters its name if named incorrectly
def __init__(self, databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
if os.path.isfile(databaseName):
self.databaseName = databaseName;
self.conn = sqlite3.connect(self.databaseName)
self.cursor = self.conn.cursor()
else:
#sees if database name is unique, so it doesn't overwrite anything
sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it")
def createTables(self):
#creates tables if they do not exist
self.cursor.execute("CREATE TABLE IF NOT EXISTS students (wID text, email text, UNIQUE(wID, email) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (labNumber int, wID text, URL text, metadata text, URLsToGrade text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (labNumber int, URL text, grade text, hidden int, PRIMARY KEY(labNumber, URL, hidden))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (labNumber int, URL text, wID text, response text, practice boolean, PRIMARY KEY(labNumber, URL, response))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (labNumber int, questionNumber int, questionWebassignNumber int, practice boolean)")
weightString = ''
for i in range(6):
weightString += ', weight'+str(i+1)+' num'
self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (labNumber int, wID text"+weightString+", weightSum num)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS grades(labNumber int, wID text, URL text, finalGrade number, finalGradeVector text, rawGrade number, rawGradeVector text)")
##check to see if the tables have already been created
#creates columns in tables for each lab specified
self.conn.commit()
#adds a person into the database, works for both new users and existing ones
def addEntry(self, wID, URL, labNumber, metadata = None):
if self.databaseName != None and self.conn != None and self.cursor !=None:
#If the student did not submit a URL (aka the inputted URL is '')
if URL == '':
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#try putting the student and its URL into the uniqueStudentURL database to check if the URL is unique
else:
try:
self.cursor.execute("INSERT INTO uniqueStudentURL VALUES (?,?,?)", [labNumber, wID, URL])
#if there is no error in inserting to a table where URL has to be unique, put it in the actual student database
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#if the try fails, that means that the URL is already in the db, duplicate URL found!
except:
self.cursor.execute("SELECT wID FROM uniqueStudentURL WHERE URL=?", [URL])
print "URL: " + URL + " was initially submitted by: " + self.cursor.fetchall()[0][0]
URL = "DUPLICATEURL"
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
self.conn.commit()
def addEmail(self, wID, email):
try:
self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email])
except:
print "wID: " + wID + " or email: " + email + " already in database."
#retrieves URL for a specific student and specific lab number
def getURL(self, wID, labNumber):
self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID])
URL = self.cursor.fetchone();
if URL is not None:
return (URL[0])
else:
return None
def addExpertURL(self, labNumber, URL, grade, hidden):
self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL])
#adds in a user if not in database already
presentURL = self.cursor.fetchone()
if presentURL == None:
self.cursor.execute("INSERT INTO experts VALUES (?, ?, ?, ?)", [labNumber, URL, listToString(grade), hidden])
self.conn.commit()
elif presentURL == URL:
print "The URL " + URL + " is already in the expert database"
else:
sys.exit("Trying to overrite")
##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet
#self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade")
#print self.cursor.fetchall()
#query = ("SELECT {0} FROM expert WHERE wID
def getExpertURLs(self, labNumber):
self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber])
URLsAndGrades = {}
for d in self.cursor.fetchall():
URLsAndGrades[str(d[0])] = stringToList(str(d[1]))
return URLsAndGrades
def finalize(self, labNumber, seed, N, MOOC=False):
##randomize the youtube URLs
#for each wID
#put that into the databse under the student ID
|
def getURLsToGrade(self, wID, labNumber):
self.cursor.execute("Select URLsToGrade FROM submissions WHERE wID=? and labNumber=?", [wID, labNumber])
dbExtract = self.cursor.fetchone()
if dbExtract == None:
return False
else:
return [i for i in stringToList(dbExtract[0])]
def addGrade(self, wID, labNumber, URL, grade , practice = False):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
if URLsToGrade != False:
if URL in URLsToGrade:
self.cursor.execute("INSERT INTO responses VALUES(?, ?, ?, ?, ?)", [labNumber, URL, wID, listToString(grade), practice])
self.conn.commit()
else:
print "wID: " + wID + " was not assigned to grade URL: " + URL
else:
print("wID: " + wID + " not in the submissions table")
def wIDGradesSubmitted(self, wID, labNumber):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
gradesSubmitted = {}
for URL in URLsToGrade:
self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL])
dbExtract = self.cursor.fetchall()
#if they did not grade the URL assigned to them
if dbExtract!=[]:
gradesSubmitted[URL] = stringToList(str(dbExtract[0][0]))
else:
gradesSubmitted[URL] = None
return gradesSubmitted
def compareToExpert(self, wID, labNumber):
expertURLsAndGrades = self.getExpertURLs(labNumber)
userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber)
URLsGraded = userSubmittedGrades.keys()
for key in expertURLsAndGrades.keys():
if key in URLsGraded:
print expertURLsAndGrades[key]
print userSubmittedGrades[key]
def getGrades(self, wID, labNumber):
URL = self.getURL(wID, labNumber)
self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL])
grades = {}
for d in self.cursor.fetchall():
grades[str(d[1])] = str(d[0])
return grades
def check(self, labNumber):
# params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None)
self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''")
fetch = self.cursor.fetchall()
individualURL = [str(d[0]) for d in fetch]
URLList = listToString([str(d[1]) for d in fetch])
for i in range(1, len(individualURL)-1):
if individualURL[i] not in stringToList(URLList[i]):
print individualURL[i]
return False
return True
if False:
os.remove("test.db")
makeDatabase("test.db")
sqldb = SqliteDB("test.db")
sqldb.createTables()
sqldb.addEntry("1", "1lkjsdf", 1)
sqldb.addEntry("2", "1lkjsdf", 1)
sqldb.addEntry("3", "1lkjsdf", 1)
sqldb.addEntry("4", "4lkjsdf", 1)
# sqldb.addEntry("4a",None , 2)
sqldb.addEntry("5", "5lkjsdf", 1)
sqldb.addEntry("6", "6lkjsdf", 1)
sqldb.addEntry("7", "7lkjsdf", 1)
sqldb.getURL("1", 1)
sqldb.getURL("2", 1)
sqldb.addExpertURL(1, "5lkjsdf",[1, 2, 3, 4, 5, 6, 7], 0)
sqldb.addExpertURL(1, "2lkjsdf", [1, 7, 3, 1, 6, 3], 0)
# sqldb.addEntry("8", None, 2)
sqldb.addEntry("8", '', 1)
sqldb.addEntry(9, "hidden", 1)
sqldb.addExpertURL(1, "hidden", [1, 2, 3], 1)
print "testing below"
sqldb.finalize(1, 1, 3)
print sqldb.getURLsToGrade("1", 1)
sqldb.addGrade("1",1, "5lkjsdf", [1, 2, 3, 4])
sqldb.addGrade("12",1, "asdf", 1)
sqldb.addGrade("1", 1, "2kjla", 1)
sqldb.addGrade("2", "1", "5lkjsdf", [4, 3, 2, 1])
sqldb.wIDGradesSubmitted("1", 1)
sqldb.getGrades("5", 1)
sqldb.getExpertURLs(1)
sqldb.compareToExpert("1",1)
sqldb.check(1)
# sqldb.addExpert("expertVideo", 1, 1)
# sqldb.addExpert("test2", 2, 2)
| self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber])
expertURL = [str(d[0]) for d in self.cursor.fetchall()]
# find all the hidden expert videos
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber])
hiddenURL = [str(d[0]) for d in self.cursor.fetchall()]
#get all the studnet URLs
self.cursor.execute("SELECT URL from submissions WHERE labNumber=?", [labNumber])
data = [str(d[0]) for d in self.cursor.fetchall()]
#assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above
if len(expertURL) + N + 1 <= len(data):
pseudoURL = {}
for d in expertURL:
#if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem
#semester, in which case, we skip it
if d in data:
self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d])
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
for d in hiddenURL:
if d in data:
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
wIDPseudoURL = {}
if(data.count('') > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index('') + 1) % len(data)
while data[indice] == '':
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove('')
else:
while '' in data:
data.remove('')
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
if(data.count("DUPLICATEURL") > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index("DUPLICATEURL") + 1) % len(data)
while data[indice] == "DUPLICATEURL":
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove("DUPLICATEURL")
else:
while '' in data:
data.remove('')
#self.cursor.execute(query)
random.shuffle(data)
selectFrom = data + data[:N + len(expertURL) + 1]
if len(pseudoURL.keys()) > 0:
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
for key in pseudoURL.keys():
startIndex = selectFrom.index(pseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(wIDPseudoURL.keys()) > 0:
for key in wIDPseudoURL.keys():
startIndex = selectFrom.index(wIDPseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE wID=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(data) > N:
for d in data:
startIndex = selectFrom.index(d)
URLSToGrade = selectFrom[startIndex:startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber])
self.conn.commit() | identifier_body |
sqlite1.py | import sqlite3
import os.path
import sys
import random
def makeDatabase(databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
conn = sqlite3.connect(databaseName)
conn.commit()
conn.close()
def listToString(list):
string = ""
for i in list:
string += str(i)+"\t"
return string[:-1]
def stringToList(string):
list = [str(line) for line in string.split('\t')]
return list
#class for connecting, inserting, and retrieving information from a sqlite3 database
class SqliteDB:
#connects to the database, alters its name if named incorrectly
def __init__(self, databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
if os.path.isfile(databaseName):
self.databaseName = databaseName;
self.conn = sqlite3.connect(self.databaseName)
self.cursor = self.conn.cursor()
else:
#sees if database name is unique, so it doesn't overwrite anything
sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it")
def createTables(self):
#creates tables if they do not exist
self.cursor.execute("CREATE TABLE IF NOT EXISTS students (wID text, email text, UNIQUE(wID, email) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (labNumber int, wID text, URL text, metadata text, URLsToGrade text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (labNumber int, URL text, grade text, hidden int, PRIMARY KEY(labNumber, URL, hidden))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (labNumber int, URL text, wID text, response text, practice boolean, PRIMARY KEY(labNumber, URL, response))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (labNumber int, questionNumber int, questionWebassignNumber int, practice boolean)")
weightString = ''
for i in range(6):
weightString += ', weight'+str(i+1)+' num'
self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (labNumber int, wID text"+weightString+", weightSum num)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS grades(labNumber int, wID text, URL text, finalGrade number, finalGradeVector text, rawGrade number, rawGradeVector text)")
##check to see if the tables have already been created
#creates columns in tables for each lab specified
self.conn.commit()
#adds a person into the database, works for both new users and existing ones
def addEntry(self, wID, URL, labNumber, metadata = None):
if self.databaseName != None and self.conn != None and self.cursor !=None:
#If the student did not submit a URL (aka the inputted URL is '')
if URL == '':
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#try putting the student and its URL into the uniqueStudentURL database to check if the URL is unique
else:
try:
self.cursor.execute("INSERT INTO uniqueStudentURL VALUES (?,?,?)", [labNumber, wID, URL])
#if there is no error in inserting to a table where URL has to be unique, put it in the actual student database
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#if the try fails, that means that the URL is already in the db, duplicate URL found!
except:
self.cursor.execute("SELECT wID FROM uniqueStudentURL WHERE URL=?", [URL])
print "URL: " + URL + " was initially submitted by: " + self.cursor.fetchall()[0][0]
URL = "DUPLICATEURL"
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
self.conn.commit()
def addEmail(self, wID, email):
try:
self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email])
except:
print "wID: " + wID + " or email: " + email + " already in database."
#retrieves URL for a specific student and specific lab number
def getURL(self, wID, labNumber):
self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID])
URL = self.cursor.fetchone();
if URL is not None:
return (URL[0])
else:
return None
def addExpertURL(self, labNumber, URL, grade, hidden):
self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL])
#adds in a user if not in database already
presentURL = self.cursor.fetchone()
if presentURL == None:
self.cursor.execute("INSERT INTO experts VALUES (?, ?, ?, ?)", [labNumber, URL, listToString(grade), hidden])
self.conn.commit()
elif presentURL == URL:
print "The URL " + URL + " is already in the expert database"
else:
|
##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet
#self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade")
#print self.cursor.fetchall()
#query = ("SELECT {0} FROM expert WHERE wID
def getExpertURLs(self, labNumber):
self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber])
URLsAndGrades = {}
for d in self.cursor.fetchall():
URLsAndGrades[str(d[0])] = stringToList(str(d[1]))
return URLsAndGrades
def finalize(self, labNumber, seed, N, MOOC=False):
##randomize the youtube URLs
#for each wID
#put that into the databse under the student ID
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber])
expertURL = [str(d[0]) for d in self.cursor.fetchall()]
# find all the hidden expert videos
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber])
hiddenURL = [str(d[0]) for d in self.cursor.fetchall()]
#get all the studnet URLs
self.cursor.execute("SELECT URL from submissions WHERE labNumber=?", [labNumber])
data = [str(d[0]) for d in self.cursor.fetchall()]
#assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above
if len(expertURL) + N + 1 <= len(data):
pseudoURL = {}
for d in expertURL:
#if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem
#semester, in which case, we skip it
if d in data:
self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d])
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
for d in hiddenURL:
if d in data:
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
wIDPseudoURL = {}
if(data.count('') > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index('') + 1) % len(data)
while data[indice] == '':
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove('')
else:
while '' in data:
data.remove('')
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
if(data.count("DUPLICATEURL") > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index("DUPLICATEURL") + 1) % len(data)
while data[indice] == "DUPLICATEURL":
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove("DUPLICATEURL")
else:
while '' in data:
data.remove('')
#self.cursor.execute(query)
random.shuffle(data)
selectFrom = data + data[:N + len(expertURL) + 1]
if len(pseudoURL.keys()) > 0:
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
for key in pseudoURL.keys():
startIndex = selectFrom.index(pseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(wIDPseudoURL.keys()) > 0:
for key in wIDPseudoURL.keys():
startIndex = selectFrom.index(wIDPseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE wID=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(data) > N:
for d in data:
startIndex = selectFrom.index(d)
URLSToGrade = selectFrom[startIndex:startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber])
self.conn.commit()
def getURLsToGrade(self, wID, labNumber):
self.cursor.execute("Select URLsToGrade FROM submissions WHERE wID=? and labNumber=?", [wID, labNumber])
dbExtract = self.cursor.fetchone()
if dbExtract == None:
return False
else:
return [i for i in stringToList(dbExtract[0])]
def addGrade(self, wID, labNumber, URL, grade , practice = False):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
if URLsToGrade != False:
if URL in URLsToGrade:
self.cursor.execute("INSERT INTO responses VALUES(?, ?, ?, ?, ?)", [labNumber, URL, wID, listToString(grade), practice])
self.conn.commit()
else:
print "wID: " + wID + " was not assigned to grade URL: " + URL
else:
print("wID: " + wID + " not in the submissions table")
def wIDGradesSubmitted(self, wID, labNumber):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
gradesSubmitted = {}
for URL in URLsToGrade:
self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL])
dbExtract = self.cursor.fetchall()
#if they did not grade the URL assigned to them
if dbExtract!=[]:
gradesSubmitted[URL] = stringToList(str(dbExtract[0][0]))
else:
gradesSubmitted[URL] = None
return gradesSubmitted
def compareToExpert(self, wID, labNumber):
expertURLsAndGrades = self.getExpertURLs(labNumber)
userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber)
URLsGraded = userSubmittedGrades.keys()
for key in expertURLsAndGrades.keys():
if key in URLsGraded:
print expertURLsAndGrades[key]
print userSubmittedGrades[key]
def getGrades(self, wID, labNumber):
URL = self.getURL(wID, labNumber)
self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL])
grades = {}
for d in self.cursor.fetchall():
grades[str(d[1])] = str(d[0])
return grades
def check(self, labNumber):
# params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None)
self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''")
fetch = self.cursor.fetchall()
individualURL = [str(d[0]) for d in fetch]
URLList = listToString([str(d[1]) for d in fetch])
for i in range(1, len(individualURL)-1):
if individualURL[i] not in stringToList(URLList[i]):
print individualURL[i]
return False
return True
if False:
os.remove("test.db")
makeDatabase("test.db")
sqldb = SqliteDB("test.db")
sqldb.createTables()
sqldb.addEntry("1", "1lkjsdf", 1)
sqldb.addEntry("2", "1lkjsdf", 1)
sqldb.addEntry("3", "1lkjsdf", 1)
sqldb.addEntry("4", "4lkjsdf", 1)
# sqldb.addEntry("4a",None , 2)
sqldb.addEntry("5", "5lkjsdf", 1)
sqldb.addEntry("6", "6lkjsdf", 1)
sqldb.addEntry("7", "7lkjsdf", 1)
sqldb.getURL("1", 1)
sqldb.getURL("2", 1)
sqldb.addExpertURL(1, "5lkjsdf",[1, 2, 3, 4, 5, 6, 7], 0)
sqldb.addExpertURL(1, "2lkjsdf", [1, 7, 3, 1, 6, 3], 0)
# sqldb.addEntry("8", None, 2)
sqldb.addEntry("8", '', 1)
sqldb.addEntry(9, "hidden", 1)
sqldb.addExpertURL(1, "hidden", [1, 2, 3], 1)
print "testing below"
sqldb.finalize(1, 1, 3)
print sqldb.getURLsToGrade("1", 1)
sqldb.addGrade("1",1, "5lkjsdf", [1, 2, 3, 4])
sqldb.addGrade("12",1, "asdf", 1)
sqldb.addGrade("1", 1, "2kjla", 1)
sqldb.addGrade("2", "1", "5lkjsdf", [4, 3, 2, 1])
sqldb.wIDGradesSubmitted("1", 1)
sqldb.getGrades("5", 1)
sqldb.getExpertURLs(1)
sqldb.compareToExpert("1",1)
sqldb.check(1)
# sqldb.addExpert("expertVideo", 1, 1)
# sqldb.addExpert("test2", 2, 2)
| sys.exit("Trying to overrite") | conditional_block |
sqlite1.py | import sqlite3
import os.path
import sys
import random
def makeDatabase(databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
conn = sqlite3.connect(databaseName)
conn.commit()
conn.close()
def listToString(list):
string = ""
for i in list:
string += str(i)+"\t"
return string[:-1]
def stringToList(string):
list = [str(line) for line in string.split('\t')]
return list
#class for connecting, inserting, and retrieving information from a sqlite3 database
class SqliteDB:
#connects to the database, alters its name if named incorrectly
def __init__(self, databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
if os.path.isfile(databaseName):
self.databaseName = databaseName;
self.conn = sqlite3.connect(self.databaseName)
self.cursor = self.conn.cursor()
else:
#sees if database name is unique, so it doesn't overwrite anything
sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it")
def createTables(self):
#creates tables if they do not exist
self.cursor.execute("CREATE TABLE IF NOT EXISTS students (wID text, email text, UNIQUE(wID, email) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (labNumber int, wID text, URL text, metadata text, URLsToGrade text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (labNumber int, URL text, grade text, hidden int, PRIMARY KEY(labNumber, URL, hidden))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (labNumber int, URL text, wID text, response text, practice boolean, PRIMARY KEY(labNumber, URL, response))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (labNumber int, questionNumber int, questionWebassignNumber int, practice boolean)")
weightString = ''
for i in range(6):
weightString += ', weight'+str(i+1)+' num'
self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (labNumber int, wID text"+weightString+", weightSum num)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS grades(labNumber int, wID text, URL text, finalGrade number, finalGradeVector text, rawGrade number, rawGradeVector text)")
##check to see if the tables have already been created
#creates columns in tables for each lab specified
self.conn.commit()
#adds a person into the database, works for both new users and existing ones
def addEntry(self, wID, URL, labNumber, metadata = None):
if self.databaseName != None and self.conn != None and self.cursor !=None:
#If the student did not submit a URL (aka the inputted URL is '')
if URL == '':
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#try putting the student and its URL into the uniqueStudentURL database to check if the URL is unique
else:
try:
self.cursor.execute("INSERT INTO uniqueStudentURL VALUES (?,?,?)", [labNumber, wID, URL])
#if there is no error in inserting to a table where URL has to be unique, put it in the actual student database
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#if the try fails, that means that the URL is already in the db, duplicate URL found!
except:
self.cursor.execute("SELECT wID FROM uniqueStudentURL WHERE URL=?", [URL])
print "URL: " + URL + " was initially submitted by: " + self.cursor.fetchall()[0][0]
URL = "DUPLICATEURL"
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
self.conn.commit()
def addEmail(self, wID, email):
try:
self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email])
except:
print "wID: " + wID + " or email: " + email + " already in database."
#retrieves URL for a specific student and specific lab number
def getURL(self, wID, labNumber):
self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID])
URL = self.cursor.fetchone();
if URL is not None:
return (URL[0])
else:
return None
def addExpertURL(self, labNumber, URL, grade, hidden):
self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL])
#adds in a user if not in database already
presentURL = self.cursor.fetchone()
if presentURL == None:
self.cursor.execute("INSERT INTO experts VALUES (?, ?, ?, ?)", [labNumber, URL, listToString(grade), hidden])
self.conn.commit()
elif presentURL == URL:
print "The URL " + URL + " is already in the expert database"
else:
sys.exit("Trying to overrite")
##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet
#self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade")
#print self.cursor.fetchall()
#query = ("SELECT {0} FROM expert WHERE wID
def getExpertURLs(self, labNumber):
self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber])
URLsAndGrades = {}
for d in self.cursor.fetchall():
URLsAndGrades[str(d[0])] = stringToList(str(d[1]))
return URLsAndGrades
def finalize(self, labNumber, seed, N, MOOC=False):
##randomize the youtube URLs
#for each wID
#put that into the databse under the student ID
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber])
expertURL = [str(d[0]) for d in self.cursor.fetchall()]
# find all the hidden expert videos
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber])
hiddenURL = [str(d[0]) for d in self.cursor.fetchall()]
#get all the studnet URLs
self.cursor.execute("SELECT URL from submissions WHERE labNumber=?", [labNumber])
data = [str(d[0]) for d in self.cursor.fetchall()]
#assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above
if len(expertURL) + N + 1 <= len(data):
pseudoURL = {}
for d in expertURL:
#if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem
#semester, in which case, we skip it
if d in data:
self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d])
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
for d in hiddenURL:
if d in data:
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
wIDPseudoURL = {}
if(data.count('') > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index('') + 1) % len(data)
while data[indice] == '':
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove('')
else:
while '' in data:
data.remove('')
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
if(data.count("DUPLICATEURL") > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index("DUPLICATEURL") + 1) % len(data)
while data[indice] == "DUPLICATEURL":
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove("DUPLICATEURL")
else:
while '' in data:
data.remove('')
#self.cursor.execute(query)
random.shuffle(data)
selectFrom = data + data[:N + len(expertURL) + 1]
if len(pseudoURL.keys()) > 0:
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
for key in pseudoURL.keys():
startIndex = selectFrom.index(pseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(wIDPseudoURL.keys()) > 0:
for key in wIDPseudoURL.keys():
startIndex = selectFrom.index(wIDPseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE wID=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(data) > N:
for d in data:
startIndex = selectFrom.index(d)
URLSToGrade = selectFrom[startIndex:startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber])
self.conn.commit()
def getURLsToGrade(self, wID, labNumber):
self.cursor.execute("Select URLsToGrade FROM submissions WHERE wID=? and labNumber=?", [wID, labNumber])
dbExtract = self.cursor.fetchone()
if dbExtract == None:
return False
else:
return [i for i in stringToList(dbExtract[0])]
def addGrade(self, wID, labNumber, URL, grade , practice = False):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
if URLsToGrade != False:
if URL in URLsToGrade:
self.cursor.execute("INSERT INTO responses VALUES(?, ?, ?, ?, ?)", [labNumber, URL, wID, listToString(grade), practice])
self.conn.commit()
else:
print "wID: " + wID + " was not assigned to grade URL: " + URL
else:
print("wID: " + wID + " not in the submissions table")
def wIDGradesSubmitted(self, wID, labNumber):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
gradesSubmitted = {}
for URL in URLsToGrade:
self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL])
dbExtract = self.cursor.fetchall()
#if they did not grade the URL assigned to them
if dbExtract!=[]:
gradesSubmitted[URL] = stringToList(str(dbExtract[0][0]))
else:
gradesSubmitted[URL] = None
return gradesSubmitted
def compareToExpert(self, wID, labNumber):
expertURLsAndGrades = self.getExpertURLs(labNumber)
userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber)
URLsGraded = userSubmittedGrades.keys()
for key in expertURLsAndGrades.keys():
if key in URLsGraded:
print expertURLsAndGrades[key]
print userSubmittedGrades[key]
def getGrades(self, wID, labNumber):
URL = self.getURL(wID, labNumber)
self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL])
grades = {}
for d in self.cursor.fetchall():
grades[str(d[1])] = str(d[0])
return grades
def check(self, labNumber):
# params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None)
self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''")
fetch = self.cursor.fetchall()
individualURL = [str(d[0]) for d in fetch]
URLList = listToString([str(d[1]) for d in fetch])
for i in range(1, len(individualURL)-1):
if individualURL[i] not in stringToList(URLList[i]):
print individualURL[i]
return False
return True
if False:
os.remove("test.db")
makeDatabase("test.db")
sqldb = SqliteDB("test.db")
sqldb.createTables()
sqldb.addEntry("1", "1lkjsdf", 1)
sqldb.addEntry("2", "1lkjsdf", 1)
sqldb.addEntry("3", "1lkjsdf", 1)
sqldb.addEntry("4", "4lkjsdf", 1)
# sqldb.addEntry("4a",None , 2)
sqldb.addEntry("5", "5lkjsdf", 1)
sqldb.addEntry("6", "6lkjsdf", 1)
sqldb.addEntry("7", "7lkjsdf", 1)
sqldb.getURL("1", 1)
sqldb.getURL("2", 1)
sqldb.addExpertURL(1, "5lkjsdf",[1, 2, 3, 4, 5, 6, 7], 0)
sqldb.addExpertURL(1, "2lkjsdf", [1, 7, 3, 1, 6, 3], 0)
# sqldb.addEntry("8", None, 2)
sqldb.addEntry("8", '', 1)
sqldb.addEntry(9, "hidden", 1)
sqldb.addExpertURL(1, "hidden", [1, 2, 3], 1)
print "testing below"
sqldb.finalize(1, 1, 3)
print sqldb.getURLsToGrade("1", 1)
sqldb.addGrade("1",1, "5lkjsdf", [1, 2, 3, 4])
sqldb.addGrade("12",1, "asdf", 1)
sqldb.addGrade("1", 1, "2kjla", 1)
sqldb.addGrade("2", "1", "5lkjsdf", [4, 3, 2, 1])
sqldb.wIDGradesSubmitted("1", 1)
sqldb.getGrades("5", 1)
sqldb.getExpertURLs(1) | sqldb.check(1)
# sqldb.addExpert("expertVideo", 1, 1)
# sqldb.addExpert("test2", 2, 2) | sqldb.compareToExpert("1",1) | random_line_split |
sqlite1.py | import sqlite3
import os.path
import sys
import random
def makeDatabase(databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
conn = sqlite3.connect(databaseName)
conn.commit()
conn.close()
def listToString(list):
string = ""
for i in list:
string += str(i)+"\t"
return string[:-1]
def stringToList(string):
list = [str(line) for line in string.split('\t')]
return list
#class for connecting, inserting, and retrieving information from a sqlite3 database
class SqliteDB:
#connects to the database, alters its name if named incorrectly
def __init__(self, databaseName):
if databaseName[-3:] != ".db":
databaseName = databaseName + ".db"
if os.path.isfile(databaseName):
self.databaseName = databaseName;
self.conn = sqlite3.connect(self.databaseName)
self.cursor = self.conn.cursor()
else:
#sees if database name is unique, so it doesn't overwrite anything
sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it")
def | (self):
#creates tables if they do not exist
self.cursor.execute("CREATE TABLE IF NOT EXISTS students (wID text, email text, UNIQUE(wID, email) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (labNumber int, wID text, URL text, metadata text, URLsToGrade text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (labNumber int, URL text, grade text, hidden int, PRIMARY KEY(labNumber, URL, hidden))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (labNumber int, URL text, wID text, response text, practice boolean, PRIMARY KEY(labNumber, URL, response))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (labNumber int, questionNumber int, questionWebassignNumber int, practice boolean)")
weightString = ''
for i in range(6):
weightString += ', weight'+str(i+1)+' num'
self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (labNumber int, wID text"+weightString+", weightSum num)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS grades(labNumber int, wID text, URL text, finalGrade number, finalGradeVector text, rawGrade number, rawGradeVector text)")
##check to see if the tables have already been created
#creates columns in tables for each lab specified
self.conn.commit()
#adds a person into the database, works for both new users and existing ones
def addEntry(self, wID, URL, labNumber, metadata = None):
if self.databaseName != None and self.conn != None and self.cursor !=None:
#If the student did not submit a URL (aka the inputted URL is '')
if URL == '':
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#try putting the student and its URL into the uniqueStudentURL database to check if the URL is unique
else:
try:
self.cursor.execute("INSERT INTO uniqueStudentURL VALUES (?,?,?)", [labNumber, wID, URL])
#if there is no error in inserting to a table where URL has to be unique, put it in the actual student database
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
#if the try fails, that means that the URL is already in the db, duplicate URL found!
except:
self.cursor.execute("SELECT wID FROM uniqueStudentURL WHERE URL=?", [URL])
print "URL: " + URL + " was initially submitted by: " + self.cursor.fetchall()[0][0]
URL = "DUPLICATEURL"
self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,''])
self.conn.commit()
def addEmail(self, wID, email):
try:
self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email])
except:
print "wID: " + wID + " or email: " + email + " already in database."
#retrieves URL for a specific student and specific lab number
def getURL(self, wID, labNumber):
self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID])
URL = self.cursor.fetchone();
if URL is not None:
return (URL[0])
else:
return None
def addExpertURL(self, labNumber, URL, grade, hidden):
self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL])
#adds in a user if not in database already
presentURL = self.cursor.fetchone()
if presentURL == None:
self.cursor.execute("INSERT INTO experts VALUES (?, ?, ?, ?)", [labNumber, URL, listToString(grade), hidden])
self.conn.commit()
elif presentURL == URL:
print "The URL " + URL + " is already in the expert database"
else:
sys.exit("Trying to overrite")
##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet
#self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade")
#print self.cursor.fetchall()
#query = ("SELECT {0} FROM expert WHERE wID
def getExpertURLs(self, labNumber):
self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber])
URLsAndGrades = {}
for d in self.cursor.fetchall():
URLsAndGrades[str(d[0])] = stringToList(str(d[1]))
return URLsAndGrades
def finalize(self, labNumber, seed, N, MOOC=False):
##randomize the youtube URLs
#for each wID
#put that into the databse under the student ID
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber])
expertURL = [str(d[0]) for d in self.cursor.fetchall()]
# find all the hidden expert videos
self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber])
hiddenURL = [str(d[0]) for d in self.cursor.fetchall()]
#get all the studnet URLs
self.cursor.execute("SELECT URL from submissions WHERE labNumber=?", [labNumber])
data = [str(d[0]) for d in self.cursor.fetchall()]
#assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above
if len(expertURL) + N + 1 <= len(data):
pseudoURL = {}
for d in expertURL:
#if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem
#semester, in which case, we skip it
if d in data:
self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d])
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
for d in hiddenURL:
if d in data:
indice = (data.index(d) + 1) % len(data)
while data[indice] in expertURL or data[indice] in hiddenURL:
indice = (indice + 1) % len(data)
pseudoURL[d] = data[indice]
data.remove(d)
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
wIDPseudoURL = {}
if(data.count('') > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index('') + 1) % len(data)
while data[indice] == '':
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove('')
else:
while '' in data:
data.remove('')
self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"])
noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()]
if(data.count("DUPLICATEURL") > 0) and not MOOC:
for d in noURLSubmitted:
indice = (data.index("DUPLICATEURL") + 1) % len(data)
while data[indice] == "DUPLICATEURL":
indice = (indice + 1) % len(data)
wIDPseudoURL[d] = data[indice]
data.remove("DUPLICATEURL")
else:
while '' in data:
data.remove('')
#self.cursor.execute(query)
random.shuffle(data)
selectFrom = data + data[:N + len(expertURL) + 1]
if len(pseudoURL.keys()) > 0:
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
for key in pseudoURL.keys():
startIndex = selectFrom.index(pseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(wIDPseudoURL.keys()) > 0:
for key in wIDPseudoURL.keys():
startIndex = selectFrom.index(wIDPseudoURL[key])
URLSToGrade = selectFrom[startIndex: startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE wID=?", [listToString(expertURL + URLSToGrade), key])
self.conn.commit()
if len(data) > N:
for d in data:
startIndex = selectFrom.index(d)
URLSToGrade = selectFrom[startIndex:startIndex+N+1]
for i in hiddenURL:
URLSToGrade.append(i)
random.shuffle(URLSToGrade)
# params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL")
self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber])
self.conn.commit()
def getURLsToGrade(self, wID, labNumber):
self.cursor.execute("Select URLsToGrade FROM submissions WHERE wID=? and labNumber=?", [wID, labNumber])
dbExtract = self.cursor.fetchone()
if dbExtract == None:
return False
else:
return [i for i in stringToList(dbExtract[0])]
def addGrade(self, wID, labNumber, URL, grade , practice = False):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
if URLsToGrade != False:
if URL in URLsToGrade:
self.cursor.execute("INSERT INTO responses VALUES(?, ?, ?, ?, ?)", [labNumber, URL, wID, listToString(grade), practice])
self.conn.commit()
else:
print "wID: " + wID + " was not assigned to grade URL: " + URL
else:
print("wID: " + wID + " not in the submissions table")
def wIDGradesSubmitted(self, wID, labNumber):
URLsToGrade = self.getURLsToGrade(wID, labNumber)
gradesSubmitted = {}
for URL in URLsToGrade:
self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL])
dbExtract = self.cursor.fetchall()
#if they did not grade the URL assigned to them
if dbExtract!=[]:
gradesSubmitted[URL] = stringToList(str(dbExtract[0][0]))
else:
gradesSubmitted[URL] = None
return gradesSubmitted
def compareToExpert(self, wID, labNumber):
expertURLsAndGrades = self.getExpertURLs(labNumber)
userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber)
URLsGraded = userSubmittedGrades.keys()
for key in expertURLsAndGrades.keys():
if key in URLsGraded:
print expertURLsAndGrades[key]
print userSubmittedGrades[key]
def getGrades(self, wID, labNumber):
URL = self.getURL(wID, labNumber)
self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL])
grades = {}
for d in self.cursor.fetchall():
grades[str(d[1])] = str(d[0])
return grades
def check(self, labNumber):
# params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None)
self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''")
fetch = self.cursor.fetchall()
individualURL = [str(d[0]) for d in fetch]
URLList = listToString([str(d[1]) for d in fetch])
for i in range(1, len(individualURL)-1):
if individualURL[i] not in stringToList(URLList[i]):
print individualURL[i]
return False
return True
if False:
os.remove("test.db")
makeDatabase("test.db")
sqldb = SqliteDB("test.db")
sqldb.createTables()
sqldb.addEntry("1", "1lkjsdf", 1)
sqldb.addEntry("2", "1lkjsdf", 1)
sqldb.addEntry("3", "1lkjsdf", 1)
sqldb.addEntry("4", "4lkjsdf", 1)
# sqldb.addEntry("4a",None , 2)
sqldb.addEntry("5", "5lkjsdf", 1)
sqldb.addEntry("6", "6lkjsdf", 1)
sqldb.addEntry("7", "7lkjsdf", 1)
sqldb.getURL("1", 1)
sqldb.getURL("2", 1)
sqldb.addExpertURL(1, "5lkjsdf",[1, 2, 3, 4, 5, 6, 7], 0)
sqldb.addExpertURL(1, "2lkjsdf", [1, 7, 3, 1, 6, 3], 0)
# sqldb.addEntry("8", None, 2)
sqldb.addEntry("8", '', 1)
sqldb.addEntry(9, "hidden", 1)
sqldb.addExpertURL(1, "hidden", [1, 2, 3], 1)
print "testing below"
sqldb.finalize(1, 1, 3)
print sqldb.getURLsToGrade("1", 1)
sqldb.addGrade("1",1, "5lkjsdf", [1, 2, 3, 4])
sqldb.addGrade("12",1, "asdf", 1)
sqldb.addGrade("1", 1, "2kjla", 1)
sqldb.addGrade("2", "1", "5lkjsdf", [4, 3, 2, 1])
sqldb.wIDGradesSubmitted("1", 1)
sqldb.getGrades("5", 1)
sqldb.getExpertURLs(1)
sqldb.compareToExpert("1",1)
sqldb.check(1)
# sqldb.addExpert("expertVideo", 1, 1)
# sqldb.addExpert("test2", 2, 2)
| createTables | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public | #![feature(custom_derive)]
#![feature(plugin)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#![plugin(serde_macros)]
extern crate backtrace;
extern crate canvas;
extern crate canvas_traits;
extern crate compositing;
extern crate devtools_traits;
extern crate euclid;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gfx;
extern crate gfx_traits;
extern crate ipc_channel;
extern crate layers;
extern crate layout_traits;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate offscreen_gl_context;
#[macro_use]
extern crate profile_traits;
extern crate rand;
extern crate script_traits;
extern crate serde;
extern crate style_traits;
extern crate url;
#[macro_use]
extern crate util;
extern crate webrender_traits;
mod constellation;
mod pipeline;
#[cfg(not(target_os = "windows"))]
mod sandboxing;
mod timer_scheduler;
pub use constellation::{Constellation, FromCompositorLogger, FromScriptLogger, InitialConstellationState};
pub use pipeline::UnprivilegedPipelineContent;
#[cfg(not(target_os = "windows"))]
pub use sandboxing::content_process_sandbox_profile; | * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)] | random_line_split |
component_info.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
/**
* A `PropertyBinding` represents a mapping between a property name
* and an attribute name. It is parsed from a string of the form
* `"prop: attr"`; or simply `"propAndAttr" where the property
* and attribute have the same identifier.
*/
export class PropertyBinding {
// TODO(issue/24571): remove '!'.
bracketAttr !: string;
// TODO(issue/24571): remove '!'.
bracketParenAttr !: string;
// TODO(issue/24571): remove '!'.
parenAttr !: string;
// TODO(issue/24571): remove '!'.
onAttr !: string;
// TODO(issue/24571): remove '!'.
bindAttr !: string;
// TODO(issue/24571): remove '!'.
bindonAttr !: string;
constructor(public prop: string, public attr: string) { this.parseBinding(); }
private parseBinding() |
}
| {
this.bracketAttr = `[${this.attr}]`;
this.parenAttr = `(${this.attr})`;
this.bracketParenAttr = `[(${this.attr})]`;
const capitalAttr = this.attr.charAt(0).toUpperCase() + this.attr.substr(1);
this.onAttr = `on${capitalAttr}`;
this.bindAttr = `bind${capitalAttr}`;
this.bindonAttr = `bindon${capitalAttr}`;
} | identifier_body |
component_info.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
/**
* A `PropertyBinding` represents a mapping between a property name
* and an attribute name. It is parsed from a string of the form
* `"prop: attr"`; or simply `"propAndAttr" where the property
* and attribute have the same identifier.
*/
export class PropertyBinding {
// TODO(issue/24571): remove '!'.
bracketAttr !: string;
// TODO(issue/24571): remove '!'.
bracketParenAttr !: string;
// TODO(issue/24571): remove '!'.
parenAttr !: string;
// TODO(issue/24571): remove '!'.
onAttr !: string;
// TODO(issue/24571): remove '!'.
bindAttr !: string;
// TODO(issue/24571): remove '!'.
bindonAttr !: string;
constructor(public prop: string, public attr: string) { this.parseBinding(); }
private | () {
this.bracketAttr = `[${this.attr}]`;
this.parenAttr = `(${this.attr})`;
this.bracketParenAttr = `[(${this.attr})]`;
const capitalAttr = this.attr.charAt(0).toUpperCase() + this.attr.substr(1);
this.onAttr = `on${capitalAttr}`;
this.bindAttr = `bind${capitalAttr}`;
this.bindonAttr = `bindon${capitalAttr}`;
}
}
| parseBinding | identifier_name |
component_info.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
/**
* A `PropertyBinding` represents a mapping between a property name
* and an attribute name. It is parsed from a string of the form
* `"prop: attr"`; or simply `"propAndAttr" where the property
* and attribute have the same identifier.
*/
export class PropertyBinding {
// TODO(issue/24571): remove '!'.
bracketAttr !: string;
// TODO(issue/24571): remove '!'.
bracketParenAttr !: string;
// TODO(issue/24571): remove '!'.
parenAttr !: string;
// TODO(issue/24571): remove '!'.
onAttr !: string;
// TODO(issue/24571): remove '!'.
bindAttr !: string;
// TODO(issue/24571): remove '!'.
bindonAttr !: string;
constructor(public prop: string, public attr: string) { this.parseBinding(); }
private parseBinding() {
this.bracketAttr = `[${this.attr}]`;
this.parenAttr = `(${this.attr})`;
this.bracketParenAttr = `[(${this.attr})]`;
const capitalAttr = this.attr.charAt(0).toUpperCase() + this.attr.substr(1);
this.onAttr = `on${capitalAttr}`;
this.bindAttr = `bind${capitalAttr}`;
this.bindonAttr = `bindon${capitalAttr}`;
} | } | random_line_split |
|
ReflectionGroup.ts | import { Reflection, ReflectionKind } from './reflections/abstract';
/**
* A group of reflections. All reflections in a group are of the same kind.
*
* Reflection groups are created by the ´GroupHandler´ in the resolving phase
* of the dispatcher. The main purpose of groups is to be able to more easily
* render human readable children lists in templates.
*/
export class ReflectionGroup {
/**
* The title, a string representation of the typescript kind, of this group.
*/
title: string;
/**
* The original typescript kind of the children of this group.
*/
kind: ReflectionKind;
/**
* All reflections of this group.
*/
children: Reflection[] = [];
/**
* A list of generated css classes that should be applied to representations of this
* group in the generated markup.
*/
cssClasses?: string;
/**
* Do all children of this group have a separate document?
*
* A bound representation of the ´ReflectionGroup.getAllChildrenHaveOwnDocument´
* that can be used within templates.
*/
allChildrenHaveOwnDocument: Function;
/**
* Are all children inherited members?
*/
allChildrenAreInherited?: boolean;
/**
* Are all children private members?
*/
allChildrenArePrivate?: boolean;
/**
* Are all children private or protected members?
*/
allChildrenAreProtectedOrPrivate?: boolean;
/**
* Are all children external members?
*/
allChildrenAreExternal?: boolean;
/**
* Are any children exported declarations?
*/
someChildrenAreExported?: boolean;
/**
* Create a new ReflectionGroup instance.
*
* @param title The title of this group.
* @param kind The original typescript kind of the children of this group.
*/
constructor(title: string, kind: ReflectionKind) {
this.title = title;
this.kind = kind;
this.allChildrenHaveOwnDocument = (() => this.getAllChildrenHaveOwnDocument());
}
/**
* Do all children of this group have a separate document?
*/
private getAllChildrenHaveOwnDocument(): boolean {
let onlyOwnDocuments = true;
this.children.forEach((child) => {
onlyOwnDocuments = onlyOwnDocuments && !!child.hasOwnDocument;
});
return onlyOwnDocuments;
}
/**
* Return a raw object representation of this reflection group.
* @deprecated Use serializers instead
*/
toObject(): any {
const result = {
title: this.title,
kind: this.kind
};
| const children: any[] = [];
this.children.forEach((child) => {
children.push(child.id);
});
result['children'] = children;
}
return result;
}
} | if (this.children) { | random_line_split |
ReflectionGroup.ts | import { Reflection, ReflectionKind } from './reflections/abstract';
/**
* A group of reflections. All reflections in a group are of the same kind.
*
* Reflection groups are created by the ´GroupHandler´ in the resolving phase
* of the dispatcher. The main purpose of groups is to be able to more easily
* render human readable children lists in templates.
*/
export class ReflectionGroup {
/**
* The title, a string representation of the typescript kind, of this group.
*/
title: string;
/**
* The original typescript kind of the children of this group.
*/
kind: ReflectionKind;
/**
* All reflections of this group.
*/
children: Reflection[] = [];
/**
* A list of generated css classes that should be applied to representations of this
* group in the generated markup.
*/
cssClasses?: string;
/**
* Do all children of this group have a separate document?
*
* A bound representation of the ´ReflectionGroup.getAllChildrenHaveOwnDocument´
* that can be used within templates.
*/
allChildrenHaveOwnDocument: Function;
/**
* Are all children inherited members?
*/
allChildrenAreInherited?: boolean;
/**
* Are all children private members?
*/
allChildrenArePrivate?: boolean;
/**
* Are all children private or protected members?
*/
allChildrenAreProtectedOrPrivate?: boolean;
/**
* Are all children external members?
*/
allChildrenAreExternal?: boolean;
/**
* Are any children exported declarations?
*/
someChildrenAreExported?: boolean;
/**
* Create a new ReflectionGroup instance.
*
* @param title The title of this group.
* @param kind The original typescript kind of the children of this group.
*/
constructor(title: string, kind: ReflectionKind) {
this.title = title;
this.kind = kind;
this.allChildrenHaveOwnDocument = (() => this.getAllChildrenHaveOwnDocument());
}
/**
* Do all children of this group have a separate document?
*/
private getAllChildrenHaveOwnDocument(): boolean {
let onlyOwnDocuments = true;
this.children.forEach((child) => {
onlyOwnDocuments = onlyOwnDocuments && !!child.hasOwnDocument;
});
return onlyOwnDocuments;
}
/**
* Return a raw object representation of this reflection group.
* @deprecated Use serializers instead
*/
toObject(): any {
const result = {
title: this.title,
kind: this.kind
};
if (this.children) {
| return result;
}
}
| const children: any[] = [];
this.children.forEach((child) => {
children.push(child.id);
});
result['children'] = children;
}
| conditional_block |
ReflectionGroup.ts | import { Reflection, ReflectionKind } from './reflections/abstract';
/**
* A group of reflections. All reflections in a group are of the same kind.
*
* Reflection groups are created by the ´GroupHandler´ in the resolving phase
* of the dispatcher. The main purpose of groups is to be able to more easily
* render human readable children lists in templates.
*/
export class Re |
/**
* The title, a string representation of the typescript kind, of this group.
*/
title: string;
/**
* The original typescript kind of the children of this group.
*/
kind: ReflectionKind;
/**
* All reflections of this group.
*/
children: Reflection[] = [];
/**
* A list of generated css classes that should be applied to representations of this
* group in the generated markup.
*/
cssClasses?: string;
/**
* Do all children of this group have a separate document?
*
* A bound representation of the ´ReflectionGroup.getAllChildrenHaveOwnDocument´
* that can be used within templates.
*/
allChildrenHaveOwnDocument: Function;
/**
* Are all children inherited members?
*/
allChildrenAreInherited?: boolean;
/**
* Are all children private members?
*/
allChildrenArePrivate?: boolean;
/**
* Are all children private or protected members?
*/
allChildrenAreProtectedOrPrivate?: boolean;
/**
* Are all children external members?
*/
allChildrenAreExternal?: boolean;
/**
* Are any children exported declarations?
*/
someChildrenAreExported?: boolean;
/**
* Create a new ReflectionGroup instance.
*
* @param title The title of this group.
* @param kind The original typescript kind of the children of this group.
*/
constructor(title: string, kind: ReflectionKind) {
this.title = title;
this.kind = kind;
this.allChildrenHaveOwnDocument = (() => this.getAllChildrenHaveOwnDocument());
}
/**
* Do all children of this group have a separate document?
*/
private getAllChildrenHaveOwnDocument(): boolean {
let onlyOwnDocuments = true;
this.children.forEach((child) => {
onlyOwnDocuments = onlyOwnDocuments && !!child.hasOwnDocument;
});
return onlyOwnDocuments;
}
/**
* Return a raw object representation of this reflection group.
* @deprecated Use serializers instead
*/
toObject(): any {
const result = {
title: this.title,
kind: this.kind
};
if (this.children) {
const children: any[] = [];
this.children.forEach((child) => {
children.push(child.id);
});
result['children'] = children;
}
return result;
}
}
| flectionGroup { | identifier_name |
ReflectionGroup.ts | import { Reflection, ReflectionKind } from './reflections/abstract';
/**
* A group of reflections. All reflections in a group are of the same kind.
*
* Reflection groups are created by the ´GroupHandler´ in the resolving phase
* of the dispatcher. The main purpose of groups is to be able to more easily
* render human readable children lists in templates.
*/
export class ReflectionGroup {
/**
* The title, a string representation of the typescript kind, of this group.
*/
title: string;
/**
* The original typescript kind of the children of this group.
*/
kind: ReflectionKind;
/**
* All reflections of this group.
*/
children: Reflection[] = [];
/**
* A list of generated css classes that should be applied to representations of this
* group in the generated markup.
*/
cssClasses?: string;
/**
* Do all children of this group have a separate document?
*
* A bound representation of the ´ReflectionGroup.getAllChildrenHaveOwnDocument´
* that can be used within templates.
*/
allChildrenHaveOwnDocument: Function;
/**
* Are all children inherited members?
*/
allChildrenAreInherited?: boolean;
/**
* Are all children private members?
*/
allChildrenArePrivate?: boolean;
/**
* Are all children private or protected members?
*/
allChildrenAreProtectedOrPrivate?: boolean;
/**
* Are all children external members?
*/
allChildrenAreExternal?: boolean;
/**
* Are any children exported declarations?
*/
someChildrenAreExported?: boolean;
/**
* Create a new ReflectionGroup instance.
*
* @param title The title of this group.
* @param kind The original typescript kind of the children of this group.
*/
constructor(title: string, kind: ReflectionKind) {
this.title = title;
this.kind = kind;
this.allChildrenHaveOwnDocument = (() => this.getAllChildrenHaveOwnDocument());
}
/**
* Do all children of this group have a separate document?
*/
private getAllChildrenHaveOwnDocument(): boolean {
let onlyOwnDocuments = true;
this.children.forEach((child) => {
onlyOwnDocuments = onlyOwnDocuments && !!child.hasOwnDocument;
});
return onlyOwnDocuments;
}
/**
* Return a raw object representation of this reflection group.
* @deprecated Use serializers instead
*/
toObject(): any {
| const result = {
title: this.title,
kind: this.kind
};
if (this.children) {
const children: any[] = [];
this.children.forEach((child) => {
children.push(child.id);
});
result['children'] = children;
}
return result;
}
}
| identifier_body |
|
const-vec-of-fns.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// FIXME: #7385: hits a codegen bug on OS X x86_64
/*!
* Try to double-check that static fns have the right size (with or
* without dummy env ptr, as appropriate) by iterating a size-2 array.
* If the static size differs from the runtime size, the second element
* should be read as a null or otherwise wrong pointer and crash.
*/
fn f() |
static bare_fns: &'static [extern fn()] = &[f, f];
struct S<'self>(&'self fn());
static closures: &'static [S<'static>] = &[S(f), S(f)];
pub fn main() {
for &bare_fn in bare_fns.iter() { bare_fn() }
for &closure in closures.iter() { (*closure)() }
}
| { } | identifier_body |
const-vec-of-fns.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// FIXME: #7385: hits a codegen bug on OS X x86_64
/*!
* Try to double-check that static fns have the right size (with or
* without dummy env ptr, as appropriate) by iterating a size-2 array.
* If the static size differs from the runtime size, the second element
* should be read as a null or otherwise wrong pointer and crash.
*/
fn f() { }
static bare_fns: &'static [extern fn()] = &[f, f];
struct S<'self>(&'self fn());
static closures: &'static [S<'static>] = &[S(f), S(f)];
pub fn | () {
for &bare_fn in bare_fns.iter() { bare_fn() }
for &closure in closures.iter() { (*closure)() }
}
| main | identifier_name |
const-vec-of-fns.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // FIXME: #7385: hits a codegen bug on OS X x86_64
/*!
* Try to double-check that static fns have the right size (with or
* without dummy env ptr, as appropriate) by iterating a size-2 array.
* If the static size differs from the runtime size, the second element
* should be read as a null or otherwise wrong pointer and crash.
*/
fn f() { }
static bare_fns: &'static [extern fn()] = &[f, f];
struct S<'self>(&'self fn());
static closures: &'static [S<'static>] = &[S(f), S(f)];
pub fn main() {
for &bare_fn in bare_fns.iter() { bare_fn() }
for &closure in closures.iter() { (*closure)() }
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test | random_line_split |
cfg.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
The compiler code necessary to support the cfg! extension, which
expands to a literal `true` or `false` based on whether the given cfgs
match the current compilation environment.
*/
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token::InternedString;
use parse::token;
pub fn expand_cfg(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let mut p = cx.new_parser_from_tts(tts);
let mut cfgs = Vec::new();
// parse `cfg!(meta_item, meta_item(x,y), meta_item="foo", ...)`
while p.token != token::EOF {
cfgs.push(p.parse_meta_item());
if p.eat(&token::EOF) | // trailing comma is optional,.
p.expect(&token::COMMA);
}
// test_cfg searches for meta items looking like `cfg(foo, ...)`
let in_cfg = &[cx.meta_list(sp, InternedString::new("cfg"), cfgs)];
let matches_cfg = attr::test_cfg(cx.cfg().as_slice(),
in_cfg.iter().map(|&x| x));
let e = cx.expr_bool(sp, matches_cfg);
MacExpr::new(e)
}
| { break } | conditional_block |
cfg.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
The compiler code necessary to support the cfg! extension, which
expands to a literal `true` or `false` based on whether the given cfgs
match the current compilation environment.
*/
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token::InternedString;
use parse::token;
pub fn | (cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let mut p = cx.new_parser_from_tts(tts);
let mut cfgs = Vec::new();
// parse `cfg!(meta_item, meta_item(x,y), meta_item="foo", ...)`
while p.token != token::EOF {
cfgs.push(p.parse_meta_item());
if p.eat(&token::EOF) { break } // trailing comma is optional,.
p.expect(&token::COMMA);
}
// test_cfg searches for meta items looking like `cfg(foo, ...)`
let in_cfg = &[cx.meta_list(sp, InternedString::new("cfg"), cfgs)];
let matches_cfg = attr::test_cfg(cx.cfg().as_slice(),
in_cfg.iter().map(|&x| x));
let e = cx.expr_bool(sp, matches_cfg);
MacExpr::new(e)
}
| expand_cfg | identifier_name |
cfg.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
The compiler code necessary to support the cfg! extension, which
expands to a literal `true` or `false` based on whether the given cfgs | use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token::InternedString;
use parse::token;
pub fn expand_cfg(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let mut p = cx.new_parser_from_tts(tts);
let mut cfgs = Vec::new();
// parse `cfg!(meta_item, meta_item(x,y), meta_item="foo", ...)`
while p.token != token::EOF {
cfgs.push(p.parse_meta_item());
if p.eat(&token::EOF) { break } // trailing comma is optional,.
p.expect(&token::COMMA);
}
// test_cfg searches for meta items looking like `cfg(foo, ...)`
let in_cfg = &[cx.meta_list(sp, InternedString::new("cfg"), cfgs)];
let matches_cfg = attr::test_cfg(cx.cfg().as_slice(),
in_cfg.iter().map(|&x| x));
let e = cx.expr_bool(sp, matches_cfg);
MacExpr::new(e)
} | match the current compilation environment.
*/
| random_line_split |
cfg.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
The compiler code necessary to support the cfg! extension, which
expands to a literal `true` or `false` based on whether the given cfgs
match the current compilation environment.
*/
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token::InternedString;
use parse::token;
pub fn expand_cfg(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> | {
let mut p = cx.new_parser_from_tts(tts);
let mut cfgs = Vec::new();
// parse `cfg!(meta_item, meta_item(x,y), meta_item="foo", ...)`
while p.token != token::EOF {
cfgs.push(p.parse_meta_item());
if p.eat(&token::EOF) { break } // trailing comma is optional,.
p.expect(&token::COMMA);
}
// test_cfg searches for meta items looking like `cfg(foo, ...)`
let in_cfg = &[cx.meta_list(sp, InternedString::new("cfg"), cfgs)];
let matches_cfg = attr::test_cfg(cx.cfg().as_slice(),
in_cfg.iter().map(|&x| x));
let e = cx.expr_bool(sp, matches_cfg);
MacExpr::new(e)
} | identifier_body |
|
index.js | import selectorParser from "postcss-selector-parser"
import {
isKeyframeRule,
isStandardRule,
isStandardSelector,
isStandardTypeSelector,
report,
ruleMessages,
validateOptions,
} from "../../utils"
export const ruleName = "selector-type-case"
export const messages = ruleMessages(ruleName, {
expected: (actual, expected) => `Expected "${actual}" to be "${expected}"`,
})
export default function (expectation) {
return (root, result) => {
const validOptions = validateOptions(result, ruleName, {
actual: expectation,
possible: [
"lower",
"upper",
],
})
if (!validOptions) { return }
root.walkRules(rule => {
if (!isStandardRule(rule)) { return }
if (isKeyframeRule(rule)) { return }
const { selector } = rule
if (!isStandardSelector(selector)) |
function checkSelector(selectorAST) {
selectorAST.walkTags(tag => {
if (!isStandardTypeSelector(tag)) { return }
const { sourceIndex, value } = tag
const expectedValue = expectation === "lower" ? value.toLowerCase() : value.toUpperCase()
if (value === expectedValue) { return }
report({
message: messages.expected(value, expectedValue),
node: rule,
index: sourceIndex,
ruleName,
result,
})
})
}
selectorParser(checkSelector).process(selector)
})
}
}
| { return } | conditional_block |
index.js | import selectorParser from "postcss-selector-parser"
import {
isKeyframeRule,
isStandardRule,
isStandardSelector,
isStandardTypeSelector,
report,
ruleMessages,
validateOptions,
} from "../../utils"
export const ruleName = "selector-type-case"
export const messages = ruleMessages(ruleName, {
expected: (actual, expected) => `Expected "${actual}" to be "${expected}"`,
})
export default function (expectation) {
return (root, result) => {
const validOptions = validateOptions(result, ruleName, {
actual: expectation,
possible: [
"lower",
"upper",
],
})
if (!validOptions) { return }
root.walkRules(rule => {
if (!isStandardRule(rule)) { return }
if (isKeyframeRule(rule)) { return }
const { selector } = rule
if (!isStandardSelector(selector)) { return }
function checkSelector(selectorAST) {
selectorAST.walkTags(tag => {
if (!isStandardTypeSelector(tag)) { return }
const { sourceIndex, value } = tag
const expectedValue = expectation === "lower" ? value.toLowerCase() : value.toUpperCase()
if (value === expectedValue) { return }
report({
message: messages.expected(value, expectedValue),
node: rule,
index: sourceIndex, |
selectorParser(checkSelector).process(selector)
})
}
} | ruleName,
result,
})
})
} | random_line_split |
index.js | import selectorParser from "postcss-selector-parser"
import {
isKeyframeRule,
isStandardRule,
isStandardSelector,
isStandardTypeSelector,
report,
ruleMessages,
validateOptions,
} from "../../utils"
export const ruleName = "selector-type-case"
export const messages = ruleMessages(ruleName, {
expected: (actual, expected) => `Expected "${actual}" to be "${expected}"`,
})
export default function (expectation) {
return (root, result) => {
const validOptions = validateOptions(result, ruleName, {
actual: expectation,
possible: [
"lower",
"upper",
],
})
if (!validOptions) { return }
root.walkRules(rule => {
if (!isStandardRule(rule)) { return }
if (isKeyframeRule(rule)) { return }
const { selector } = rule
if (!isStandardSelector(selector)) { return }
function | (selectorAST) {
selectorAST.walkTags(tag => {
if (!isStandardTypeSelector(tag)) { return }
const { sourceIndex, value } = tag
const expectedValue = expectation === "lower" ? value.toLowerCase() : value.toUpperCase()
if (value === expectedValue) { return }
report({
message: messages.expected(value, expectedValue),
node: rule,
index: sourceIndex,
ruleName,
result,
})
})
}
selectorParser(checkSelector).process(selector)
})
}
}
| checkSelector | identifier_name |
index.js | import selectorParser from "postcss-selector-parser"
import {
isKeyframeRule,
isStandardRule,
isStandardSelector,
isStandardTypeSelector,
report,
ruleMessages,
validateOptions,
} from "../../utils"
export const ruleName = "selector-type-case"
export const messages = ruleMessages(ruleName, {
expected: (actual, expected) => `Expected "${actual}" to be "${expected}"`,
})
export default function (expectation) {
return (root, result) => {
const validOptions = validateOptions(result, ruleName, {
actual: expectation,
possible: [
"lower",
"upper",
],
})
if (!validOptions) { return }
root.walkRules(rule => {
if (!isStandardRule(rule)) { return }
if (isKeyframeRule(rule)) { return }
const { selector } = rule
if (!isStandardSelector(selector)) { return }
function checkSelector(selectorAST) |
selectorParser(checkSelector).process(selector)
})
}
}
| {
selectorAST.walkTags(tag => {
if (!isStandardTypeSelector(tag)) { return }
const { sourceIndex, value } = tag
const expectedValue = expectation === "lower" ? value.toLowerCase() : value.toUpperCase()
if (value === expectedValue) { return }
report({
message: messages.expected(value, expectedValue),
node: rule,
index: sourceIndex,
ruleName,
result,
})
})
} | identifier_body |
air_quality.py | """Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def air_quality_index(self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
|
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh()
| """Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
) | identifier_body |
air_quality.py | """Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
|
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def air_quality_index(self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh()
| return round(res) | conditional_block |
air_quality.py | """Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def air_quality_index(self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success | """Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh() |
@property
def device_state_attributes(self): | random_line_split |
air_quality.py | """Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def | (self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh()
| air_quality_index | identifier_name |
main.rs | fn main() {
// var()
// control_flow()
let n = 45;
let big = fib(n);
println!("{}th fib: {}", n, big);
}
fn var() |
fn control_flow() {
let number = 3;
if number < 5 {
println!("condition was true");
} else {
println!("condition was false");
}
let a = [1, 2, 3, 4, 5];
for e in a.iter() {
println!("I'm looping {}", e)
}
for number in (1..4).rev() {
println!("{}!", number)
}
println!("LIFTOFF!!!")
}
fn fib(n: u32) -> u32 {
if n == 0 {
1
} else if n == 1 {
1
} else if n == 2 {
2
} else {
fib(n - 1) + fib(n - 2)
}
}
| {
let mut x = 5;
println!("The value of x is: {}", x);
x = 6;
println!("The value of x is: {}", x);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (_, y, _) = tup;
println!("The value of y is: {}", y);
println!("The value if x is: {}", tup.0);
} | identifier_body |
main.rs | fn main() {
// var()
// control_flow()
let n = 45;
let big = fib(n);
println!("{}th fib: {}", n, big);
}
fn var() {
let mut x = 5;
println!("The value of x is: {}", x);
x = 6;
println!("The value of x is: {}", x);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (_, y, _) = tup;
println!("The value of y is: {}", y);
println!("The value if x is: {}", tup.0);
}
fn control_flow() {
let number = 3;
if number < 5 {
println!("condition was true");
} else {
println!("condition was false");
}
let a = [1, 2, 3, 4, 5];
for e in a.iter() {
println!("I'm looping {}", e)
}
for number in (1..4).rev() {
println!("{}!", number)
}
println!("LIFTOFF!!!")
}
fn | (n: u32) -> u32 {
if n == 0 {
1
} else if n == 1 {
1
} else if n == 2 {
2
} else {
fib(n - 1) + fib(n - 2)
}
}
| fib | identifier_name |
main.rs | fn main() {
// var()
// control_flow()
let n = 45;
let big = fib(n);
println!("{}th fib: {}", n, big);
}
fn var() {
let mut x = 5;
println!("The value of x is: {}", x);
x = 6;
println!("The value of x is: {}", x);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (_, y, _) = tup; |
fn control_flow() {
let number = 3;
if number < 5 {
println!("condition was true");
} else {
println!("condition was false");
}
let a = [1, 2, 3, 4, 5];
for e in a.iter() {
println!("I'm looping {}", e)
}
for number in (1..4).rev() {
println!("{}!", number)
}
println!("LIFTOFF!!!")
}
fn fib(n: u32) -> u32 {
if n == 0 {
1
} else if n == 1 {
1
} else if n == 2 {
2
} else {
fib(n - 1) + fib(n - 2)
}
} | println!("The value of y is: {}", y);
println!("The value if x is: {}", tup.0);
} | random_line_split |
main.rs | fn main() {
// var()
// control_flow()
let n = 45;
let big = fib(n);
println!("{}th fib: {}", n, big);
}
fn var() {
let mut x = 5;
println!("The value of x is: {}", x);
x = 6;
println!("The value of x is: {}", x);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (_, y, _) = tup;
println!("The value of y is: {}", y);
println!("The value if x is: {}", tup.0);
}
fn control_flow() {
let number = 3;
if number < 5 {
println!("condition was true");
} else {
println!("condition was false");
}
let a = [1, 2, 3, 4, 5];
for e in a.iter() {
println!("I'm looping {}", e)
}
for number in (1..4).rev() {
println!("{}!", number)
}
println!("LIFTOFF!!!")
}
fn fib(n: u32) -> u32 {
if n == 0 {
1
} else if n == 1 {
1
} else if n == 2 | else {
fib(n - 1) + fib(n - 2)
}
}
| {
2
} | conditional_block |
Window.ts | /*
* Copyright 2017 András Parditka.
*
* This file is part of Ecset.
*
* Ecset is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Ecset is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Ecset. If not, see <http://www.gnu.org/licenses/>.
*/
import * as m from 'mithril'
export class Window {
constructor(
public id: string, | } | public contentFactory: () => m.Comp<any, any>,
) {
} | random_line_split |
Window.ts | /*
* Copyright 2017 András Parditka.
*
* This file is part of Ecset.
*
* Ecset is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Ecset is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Ecset. If not, see <http://www.gnu.org/licenses/>.
*/
import * as m from 'mithril'
export class Window {
constructor(
public id: string,
public contentFactory: () => m.Comp<any, any>,
) { | }
|
}
| identifier_body |
Window.ts | /*
* Copyright 2017 András Parditka.
*
* This file is part of Ecset.
*
* Ecset is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Ecset is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Ecset. If not, see <http://www.gnu.org/licenses/>.
*/
import * as m from 'mithril'
export class Window {
c |
public id: string,
public contentFactory: () => m.Comp<any, any>,
) {
}
}
| onstructor( | identifier_name |
files.ts | import {Request, Response} from "express";
import { DetailedFile } from '../models/DetailedFile';
import * as utils from '../utils/WopiUtil';
export let fileRequestHandler = (req: Request, res: Response) => {
console.log('Handling ' + req.method + ' request for file/folder id ' + req.params["id"]);
res.status(200).send({'title': 'fileHandler', 'method': req.method, 'id': req.params['id']});
let files = new Array<DetailedFile>();
let docFile = new DetailedFile();
docFile.BaseFileName = 'myDocFile.docx';
let xlFile = new DetailedFile();
xlFile.BaseFileName = 'myXLFile.xlsx';
| files.push(pptFile);
utils.PopulateActions(files);
console.log(files);
}; | let pptFile = new DetailedFile();
pptFile.BaseFileName = 'myPPTFile.pptx';
files.push(docFile);
files.push(xlFile); | random_line_split |
library.py | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
|
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
| return MDAnalysis.lib.util.realpath(filename) | conditional_block |
library.py | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def | (self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
| __repr__ | identifier_name |
library.py | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
| 1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights)) | random_line_split |
|
library.py | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
|
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
| """Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename)) | identifier_body |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#![deny(warnings)]
use fbinit::FacebookInit;
use futures_stats::{FutureStats, StreamStats};
use metadata::Metadata;
use nonzero_ext::nonzero;
pub use observability::ScubaVerbosityLevel;
use observability::{ObservabilityContext, ScubaLoggingDecisionFields};
use permission_checker::MononokeIdentitySetExt;
use scuba::{builder::ServerData, ScubaSample, ScubaSampleBuilder};
pub use scuba::{Sampling, ScubaValue};
use std::collections::hash_map::Entry;
use std::io::Error as IoError;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::Duration;
use time_ext::DurationExt;
use tunables::tunables;
pub use scribe_ext::ScribeClientImplementation;
/// An extensible wrapper struct around `ScubaSampleBuilder`
#[derive(Clone)]
pub struct MononokeScubaSampleBuilder {
inner: ScubaSampleBuilder,
maybe_observability_context: Option<ObservabilityContext>,
// This field decides if sampled out requests should
// still be logged when verbose logging is enabled
fallback_sampled_out_to_verbose: bool,
}
impl std::fmt::Debug for MononokeScubaSampleBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MononokeScubaSampleBuilder({:?})", self.inner)
}
}
impl MononokeScubaSampleBuilder {
pub fn new(fb: FacebookInit, scuba_table: &str) -> Self {
Self {
inner: ScubaSampleBuilder::new(fb, scuba_table),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_discard() -> Self {
Self {
inner: ScubaSampleBuilder::with_discard(),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_opt_table(fb: FacebookInit, scuba_table: Option<String>) -> Self {
match scuba_table {
None => Self::with_discard(),
Some(scuba_table) => Self::new(fb, &scuba_table),
}
}
pub fn with_observability_context(self, octx: ObservabilityContext) -> Self {
Self {
maybe_observability_context: Some(octx),
..self
}
}
fn get_logging_decision_fields(&self) -> ScubaLoggingDecisionFields {
ScubaLoggingDecisionFields {
maybe_session_id: self.get("session_uuid"),
maybe_unix_username: self.get("unix_username"),
maybe_source_hostname: self.get("source_hostname"),
}
}
pub fn should_log_with_level(&self, level: ScubaVerbosityLevel) -> bool {
match level {
ScubaVerbosityLevel::Normal => true,
ScubaVerbosityLevel::Verbose => self
.maybe_observability_context
.as_ref()
.map_or(false, |octx| {
octx.should_log_scuba_sample(level, self.get_logging_decision_fields())
}),
}
}
pub fn add<K: Into<String>, V: Into<ScubaValue>>(&mut self, key: K, value: V) -> &mut Self {
self.inner.add(key, value);
self
}
pub fn add_metadata(&mut self, metadata: &Metadata) -> &mut Self {
self.inner
.add("session_uuid", metadata.session_id().to_string());
self.inner.add(
"client_identities",
metadata
.identities()
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>(),
);
if let Some(client_hostname) = metadata.client_hostname() {
// "source_hostname" to remain compatible with historical logging
self.inner
.add("source_hostname", client_hostname.to_owned());
} else {
if let Some(client_ip) = metadata.client_ip() {
self.inner.add("client_ip", client_ip.to_string());
}
}
if let Some(unix_name) = metadata.unix_name() {
// "unix_username" to remain compatible with historical logging
self.inner.add("unix_username", unix_name);
}
self.inner
.add_opt("sandcastle_alias", metadata.sandcastle_alias());
self.inner
.add_opt("sandcastle_nonce", metadata.sandcastle_nonce());
self.inner
.add_opt("clientinfo_tag", metadata.clientinfo_u64tag());
self
}
pub fn sample_for_identities(&mut self, identities: &impl MononokeIdentitySetExt) {
// Details of quicksand traffic aren't particularly interesting because all Quicksand tasks are
// doing effectively the same thing at the same time. If we need real-time debugging, we can
// always rely on updating the verbosity in real time.
if identities.is_quicksand() {
self.sampled_unless_verbose(nonzero!(100u64));
}
}
pub fn log_with_msg<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if self.fallback_sampled_out_to_verbose
&& self.should_log_with_level(ScubaVerbosityLevel::Verbose)
{
// We need to unsample before we log, so that
// `sample_rate` field is not added, as we are about
// to log everything.
self.inner.unsampled();
}
self.inner.add("log_tag", log_tag);
if let Some(mut msg) = msg.into() {
match tunables().get_max_scuba_msg_length().try_into() {
Ok(size) if size > 0 && msg.len() > size => {
msg.truncate(size);
msg.push_str(" (...)");
}
_ => {}
};
self.inner.add("msg", msg);
}
self.inner.log();
}
/// Same as `log_with_msg`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_with_msg_verbose<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
return;
}
self.log_with_msg(log_tag, msg)
}
pub fn add_stream_stats(&mut self, stats: &StreamStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add("count", stats.count)
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn add_future_stats(&mut self, stats: &FutureStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn is_discard(&self) -> bool {
self.inner.is_discard()
}
pub fn sampled(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = false;
self.inner.sampled(sample_rate);
self
}
pub fn sampled_unless_verbose(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = true;
self.inner.sampled(sample_rate);
self
}
pub fn unsampled(&mut self) -> &mut Self {
self.inner.unsampled();
self
}
pub fn log(&mut self) -> bool {
self.inner.log()
}
/// Same as `log`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_verbose(&mut self) -> bool {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
// Return value of the `log` function indicates whether
// the sample passed sampling. If it's too verbose, let's
// return false
return false;
}
self.log()
}
pub fn add_common_server_data(&mut self) -> &mut Self {
self.inner.add_common_server_data();
self
}
pub fn sampling(&self) -> &Sampling {
self.inner.sampling()
}
pub fn add_mapped_common_server_data<F>(&mut self, mapper: F) -> &mut Self
where
F: Fn(ServerData) -> &'static str,
{
self.inner.add_mapped_common_server_data(mapper);
self
}
pub fn with_log_file<L: AsRef<Path>>(mut self, log_file: L) -> Result<Self, IoError> {
self.inner = self.inner.with_log_file(log_file)?;
Ok(self)
}
pub fn with_seq(mut self, key: impl Into<String>) -> Self {
self.inner = self.inner.with_seq(key);
self
}
pub fn log_with_time(&mut self, time: u64) -> bool {
self.inner.log_with_time(time)
}
pub fn | <K: Into<String>>(&mut self, key: K) -> Entry<String, ScubaValue> {
self.inner.entry(key)
}
pub fn flush(&self, timeout: Duration) {
self.inner.flush(timeout)
}
pub fn get_sample(&self) -> &ScubaSample {
self.inner.get_sample()
}
pub fn add_opt<K: Into<String>, V: Into<ScubaValue>>(
&mut self,
key: K,
value: Option<V>,
) -> &mut Self {
self.inner.add_opt(key, value);
self
}
pub fn get<K: Into<String>>(&self, key: K) -> Option<&ScubaValue> {
self.inner.get(key)
}
}
| entry | identifier_name |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#![deny(warnings)]
use fbinit::FacebookInit;
use futures_stats::{FutureStats, StreamStats};
use metadata::Metadata;
use nonzero_ext::nonzero;
pub use observability::ScubaVerbosityLevel;
use observability::{ObservabilityContext, ScubaLoggingDecisionFields};
use permission_checker::MononokeIdentitySetExt;
use scuba::{builder::ServerData, ScubaSample, ScubaSampleBuilder};
pub use scuba::{Sampling, ScubaValue};
use std::collections::hash_map::Entry;
use std::io::Error as IoError;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::Duration;
use time_ext::DurationExt;
use tunables::tunables;
pub use scribe_ext::ScribeClientImplementation;
/// An extensible wrapper struct around `ScubaSampleBuilder`
#[derive(Clone)]
pub struct MononokeScubaSampleBuilder {
inner: ScubaSampleBuilder,
maybe_observability_context: Option<ObservabilityContext>,
// This field decides if sampled out requests should
// still be logged when verbose logging is enabled
fallback_sampled_out_to_verbose: bool,
}
impl std::fmt::Debug for MononokeScubaSampleBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MononokeScubaSampleBuilder({:?})", self.inner)
}
}
impl MononokeScubaSampleBuilder {
pub fn new(fb: FacebookInit, scuba_table: &str) -> Self {
Self {
inner: ScubaSampleBuilder::new(fb, scuba_table),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_discard() -> Self {
Self {
inner: ScubaSampleBuilder::with_discard(),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_opt_table(fb: FacebookInit, scuba_table: Option<String>) -> Self {
match scuba_table {
None => Self::with_discard(),
Some(scuba_table) => Self::new(fb, &scuba_table),
}
}
pub fn with_observability_context(self, octx: ObservabilityContext) -> Self {
Self {
maybe_observability_context: Some(octx),
..self
}
}
fn get_logging_decision_fields(&self) -> ScubaLoggingDecisionFields {
ScubaLoggingDecisionFields {
maybe_session_id: self.get("session_uuid"),
maybe_unix_username: self.get("unix_username"),
maybe_source_hostname: self.get("source_hostname"),
}
}
pub fn should_log_with_level(&self, level: ScubaVerbosityLevel) -> bool {
match level {
ScubaVerbosityLevel::Normal => true,
ScubaVerbosityLevel::Verbose => self
.maybe_observability_context
.as_ref()
.map_or(false, |octx| {
octx.should_log_scuba_sample(level, self.get_logging_decision_fields())
}),
}
}
pub fn add<K: Into<String>, V: Into<ScubaValue>>(&mut self, key: K, value: V) -> &mut Self {
self.inner.add(key, value);
self
}
pub fn add_metadata(&mut self, metadata: &Metadata) -> &mut Self {
self.inner
.add("session_uuid", metadata.session_id().to_string());
self.inner.add(
"client_identities",
metadata
.identities()
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>(),
);
if let Some(client_hostname) = metadata.client_hostname() {
// "source_hostname" to remain compatible with historical logging
self.inner
.add("source_hostname", client_hostname.to_owned());
} else {
if let Some(client_ip) = metadata.client_ip() {
self.inner.add("client_ip", client_ip.to_string());
}
}
if let Some(unix_name) = metadata.unix_name() {
// "unix_username" to remain compatible with historical logging
self.inner.add("unix_username", unix_name);
}
self.inner
.add_opt("sandcastle_alias", metadata.sandcastle_alias());
self.inner
.add_opt("sandcastle_nonce", metadata.sandcastle_nonce());
self.inner
.add_opt("clientinfo_tag", metadata.clientinfo_u64tag());
self
}
pub fn sample_for_identities(&mut self, identities: &impl MononokeIdentitySetExt) {
// Details of quicksand traffic aren't particularly interesting because all Quicksand tasks are
// doing effectively the same thing at the same time. If we need real-time debugging, we can
// always rely on updating the verbosity in real time.
if identities.is_quicksand() {
self.sampled_unless_verbose(nonzero!(100u64));
}
}
pub fn log_with_msg<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if self.fallback_sampled_out_to_verbose
&& self.should_log_with_level(ScubaVerbosityLevel::Verbose)
{
// We need to unsample before we log, so that
// `sample_rate` field is not added, as we are about
// to log everything.
self.inner.unsampled();
}
self.inner.add("log_tag", log_tag);
if let Some(mut msg) = msg.into() {
match tunables().get_max_scuba_msg_length().try_into() {
Ok(size) if size > 0 && msg.len() > size => {
msg.truncate(size);
msg.push_str(" (...)");
}
_ => {}
};
self.inner.add("msg", msg);
}
self.inner.log();
}
/// Same as `log_with_msg`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_with_msg_verbose<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
return;
}
self.log_with_msg(log_tag, msg)
}
pub fn add_stream_stats(&mut self, stats: &StreamStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add("count", stats.count)
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn add_future_stats(&mut self, stats: &FutureStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn is_discard(&self) -> bool {
self.inner.is_discard()
}
pub fn sampled(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = false;
self.inner.sampled(sample_rate);
self
}
pub fn sampled_unless_verbose(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = true;
self.inner.sampled(sample_rate);
self
}
pub fn unsampled(&mut self) -> &mut Self {
self.inner.unsampled();
self
}
pub fn log(&mut self) -> bool {
self.inner.log()
}
/// Same as `log`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_verbose(&mut self) -> bool {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) |
self.log()
}
pub fn add_common_server_data(&mut self) -> &mut Self {
self.inner.add_common_server_data();
self
}
pub fn sampling(&self) -> &Sampling {
self.inner.sampling()
}
pub fn add_mapped_common_server_data<F>(&mut self, mapper: F) -> &mut Self
where
F: Fn(ServerData) -> &'static str,
{
self.inner.add_mapped_common_server_data(mapper);
self
}
pub fn with_log_file<L: AsRef<Path>>(mut self, log_file: L) -> Result<Self, IoError> {
self.inner = self.inner.with_log_file(log_file)?;
Ok(self)
}
pub fn with_seq(mut self, key: impl Into<String>) -> Self {
self.inner = self.inner.with_seq(key);
self
}
pub fn log_with_time(&mut self, time: u64) -> bool {
self.inner.log_with_time(time)
}
pub fn entry<K: Into<String>>(&mut self, key: K) -> Entry<String, ScubaValue> {
self.inner.entry(key)
}
pub fn flush(&self, timeout: Duration) {
self.inner.flush(timeout)
}
pub fn get_sample(&self) -> &ScubaSample {
self.inner.get_sample()
}
pub fn add_opt<K: Into<String>, V: Into<ScubaValue>>(
&mut self,
key: K,
value: Option<V>,
) -> &mut Self {
self.inner.add_opt(key, value);
self
}
pub fn get<K: Into<String>>(&self, key: K) -> Option<&ScubaValue> {
self.inner.get(key)
}
}
| {
// Return value of the `log` function indicates whether
// the sample passed sampling. If it's too verbose, let's
// return false
return false;
} | conditional_block |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#![deny(warnings)]
use fbinit::FacebookInit;
use futures_stats::{FutureStats, StreamStats};
use metadata::Metadata;
use nonzero_ext::nonzero;
pub use observability::ScubaVerbosityLevel;
use observability::{ObservabilityContext, ScubaLoggingDecisionFields};
use permission_checker::MononokeIdentitySetExt;
use scuba::{builder::ServerData, ScubaSample, ScubaSampleBuilder};
pub use scuba::{Sampling, ScubaValue};
use std::collections::hash_map::Entry;
use std::io::Error as IoError;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::Duration;
use time_ext::DurationExt;
use tunables::tunables;
pub use scribe_ext::ScribeClientImplementation;
/// An extensible wrapper struct around `ScubaSampleBuilder`
#[derive(Clone)]
pub struct MononokeScubaSampleBuilder {
inner: ScubaSampleBuilder,
maybe_observability_context: Option<ObservabilityContext>,
// This field decides if sampled out requests should
// still be logged when verbose logging is enabled
fallback_sampled_out_to_verbose: bool,
}
impl std::fmt::Debug for MononokeScubaSampleBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MononokeScubaSampleBuilder({:?})", self.inner)
}
}
impl MononokeScubaSampleBuilder {
pub fn new(fb: FacebookInit, scuba_table: &str) -> Self {
Self {
inner: ScubaSampleBuilder::new(fb, scuba_table),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_discard() -> Self {
Self {
inner: ScubaSampleBuilder::with_discard(),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_opt_table(fb: FacebookInit, scuba_table: Option<String>) -> Self {
match scuba_table {
None => Self::with_discard(),
Some(scuba_table) => Self::new(fb, &scuba_table),
}
}
pub fn with_observability_context(self, octx: ObservabilityContext) -> Self {
Self {
maybe_observability_context: Some(octx),
..self
}
}
fn get_logging_decision_fields(&self) -> ScubaLoggingDecisionFields {
ScubaLoggingDecisionFields {
maybe_session_id: self.get("session_uuid"),
maybe_unix_username: self.get("unix_username"),
maybe_source_hostname: self.get("source_hostname"),
}
}
pub fn should_log_with_level(&self, level: ScubaVerbosityLevel) -> bool {
match level {
ScubaVerbosityLevel::Normal => true,
ScubaVerbosityLevel::Verbose => self
.maybe_observability_context
.as_ref()
.map_or(false, |octx| {
octx.should_log_scuba_sample(level, self.get_logging_decision_fields())
}),
}
}
pub fn add<K: Into<String>, V: Into<ScubaValue>>(&mut self, key: K, value: V) -> &mut Self {
self.inner.add(key, value);
self
}
pub fn add_metadata(&mut self, metadata: &Metadata) -> &mut Self {
self.inner
.add("session_uuid", metadata.session_id().to_string());
self.inner.add(
"client_identities",
metadata
.identities()
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>(),
);
if let Some(client_hostname) = metadata.client_hostname() {
// "source_hostname" to remain compatible with historical logging
self.inner
.add("source_hostname", client_hostname.to_owned());
} else {
if let Some(client_ip) = metadata.client_ip() {
self.inner.add("client_ip", client_ip.to_string());
}
}
if let Some(unix_name) = metadata.unix_name() {
// "unix_username" to remain compatible with historical logging
self.inner.add("unix_username", unix_name);
}
self.inner
.add_opt("sandcastle_alias", metadata.sandcastle_alias());
self.inner
.add_opt("sandcastle_nonce", metadata.sandcastle_nonce());
self.inner
.add_opt("clientinfo_tag", metadata.clientinfo_u64tag());
self
}
pub fn sample_for_identities(&mut self, identities: &impl MononokeIdentitySetExt) {
// Details of quicksand traffic aren't particularly interesting because all Quicksand tasks are
// doing effectively the same thing at the same time. If we need real-time debugging, we can
// always rely on updating the verbosity in real time.
if identities.is_quicksand() {
self.sampled_unless_verbose(nonzero!(100u64));
}
}
pub fn log_with_msg<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if self.fallback_sampled_out_to_verbose
&& self.should_log_with_level(ScubaVerbosityLevel::Verbose)
{
// We need to unsample before we log, so that
// `sample_rate` field is not added, as we are about
// to log everything.
self.inner.unsampled();
}
self.inner.add("log_tag", log_tag);
if let Some(mut msg) = msg.into() {
match tunables().get_max_scuba_msg_length().try_into() {
Ok(size) if size > 0 && msg.len() > size => {
msg.truncate(size);
msg.push_str(" (...)");
}
_ => {}
};
self.inner.add("msg", msg);
}
self.inner.log();
}
/// Same as `log_with_msg`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_with_msg_verbose<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) |
pub fn add_stream_stats(&mut self, stats: &StreamStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add("count", stats.count)
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn add_future_stats(&mut self, stats: &FutureStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn is_discard(&self) -> bool {
self.inner.is_discard()
}
pub fn sampled(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = false;
self.inner.sampled(sample_rate);
self
}
pub fn sampled_unless_verbose(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = true;
self.inner.sampled(sample_rate);
self
}
pub fn unsampled(&mut self) -> &mut Self {
self.inner.unsampled();
self
}
pub fn log(&mut self) -> bool {
self.inner.log()
}
/// Same as `log`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_verbose(&mut self) -> bool {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
// Return value of the `log` function indicates whether
// the sample passed sampling. If it's too verbose, let's
// return false
return false;
}
self.log()
}
pub fn add_common_server_data(&mut self) -> &mut Self {
self.inner.add_common_server_data();
self
}
pub fn sampling(&self) -> &Sampling {
self.inner.sampling()
}
pub fn add_mapped_common_server_data<F>(&mut self, mapper: F) -> &mut Self
where
F: Fn(ServerData) -> &'static str,
{
self.inner.add_mapped_common_server_data(mapper);
self
}
pub fn with_log_file<L: AsRef<Path>>(mut self, log_file: L) -> Result<Self, IoError> {
self.inner = self.inner.with_log_file(log_file)?;
Ok(self)
}
pub fn with_seq(mut self, key: impl Into<String>) -> Self {
self.inner = self.inner.with_seq(key);
self
}
pub fn log_with_time(&mut self, time: u64) -> bool {
self.inner.log_with_time(time)
}
pub fn entry<K: Into<String>>(&mut self, key: K) -> Entry<String, ScubaValue> {
self.inner.entry(key)
}
pub fn flush(&self, timeout: Duration) {
self.inner.flush(timeout)
}
pub fn get_sample(&self) -> &ScubaSample {
self.inner.get_sample()
}
pub fn add_opt<K: Into<String>, V: Into<ScubaValue>>(
&mut self,
key: K,
value: Option<V>,
) -> &mut Self {
self.inner.add_opt(key, value);
self
}
pub fn get<K: Into<String>>(&self, key: K) -> Option<&ScubaValue> {
self.inner.get(key)
}
}
| {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
return;
}
self.log_with_msg(log_tag, msg)
} | identifier_body |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#![deny(warnings)]
use fbinit::FacebookInit;
use futures_stats::{FutureStats, StreamStats};
use metadata::Metadata;
use nonzero_ext::nonzero;
pub use observability::ScubaVerbosityLevel;
use observability::{ObservabilityContext, ScubaLoggingDecisionFields};
use permission_checker::MononokeIdentitySetExt;
use scuba::{builder::ServerData, ScubaSample, ScubaSampleBuilder};
pub use scuba::{Sampling, ScubaValue};
use std::collections::hash_map::Entry;
use std::io::Error as IoError;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::Duration;
use time_ext::DurationExt;
use tunables::tunables;
pub use scribe_ext::ScribeClientImplementation;
/// An extensible wrapper struct around `ScubaSampleBuilder`
#[derive(Clone)]
pub struct MononokeScubaSampleBuilder {
inner: ScubaSampleBuilder,
maybe_observability_context: Option<ObservabilityContext>,
// This field decides if sampled out requests should
// still be logged when verbose logging is enabled
fallback_sampled_out_to_verbose: bool,
}
impl std::fmt::Debug for MononokeScubaSampleBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MononokeScubaSampleBuilder({:?})", self.inner)
}
}
impl MononokeScubaSampleBuilder {
pub fn new(fb: FacebookInit, scuba_table: &str) -> Self {
Self {
inner: ScubaSampleBuilder::new(fb, scuba_table),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_discard() -> Self {
Self {
inner: ScubaSampleBuilder::with_discard(),
maybe_observability_context: None,
fallback_sampled_out_to_verbose: false,
}
}
pub fn with_opt_table(fb: FacebookInit, scuba_table: Option<String>) -> Self {
match scuba_table {
None => Self::with_discard(),
Some(scuba_table) => Self::new(fb, &scuba_table),
}
}
pub fn with_observability_context(self, octx: ObservabilityContext) -> Self {
Self {
maybe_observability_context: Some(octx),
..self
}
}
fn get_logging_decision_fields(&self) -> ScubaLoggingDecisionFields {
ScubaLoggingDecisionFields {
maybe_session_id: self.get("session_uuid"),
maybe_unix_username: self.get("unix_username"),
maybe_source_hostname: self.get("source_hostname"),
}
}
pub fn should_log_with_level(&self, level: ScubaVerbosityLevel) -> bool {
match level {
ScubaVerbosityLevel::Normal => true,
ScubaVerbosityLevel::Verbose => self
.maybe_observability_context
.as_ref()
.map_or(false, |octx| {
octx.should_log_scuba_sample(level, self.get_logging_decision_fields())
}),
}
}
pub fn add<K: Into<String>, V: Into<ScubaValue>>(&mut self, key: K, value: V) -> &mut Self {
self.inner.add(key, value);
self
}
pub fn add_metadata(&mut self, metadata: &Metadata) -> &mut Self {
self.inner
.add("session_uuid", metadata.session_id().to_string());
self.inner.add(
"client_identities",
metadata
.identities()
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>(),
);
if let Some(client_hostname) = metadata.client_hostname() {
// "source_hostname" to remain compatible with historical logging
self.inner
.add("source_hostname", client_hostname.to_owned());
} else {
if let Some(client_ip) = metadata.client_ip() {
self.inner.add("client_ip", client_ip.to_string());
}
}
if let Some(unix_name) = metadata.unix_name() {
// "unix_username" to remain compatible with historical logging
self.inner.add("unix_username", unix_name);
}
self.inner
.add_opt("sandcastle_alias", metadata.sandcastle_alias());
self.inner
.add_opt("sandcastle_nonce", metadata.sandcastle_nonce());
self.inner
.add_opt("clientinfo_tag", metadata.clientinfo_u64tag());
self
}
pub fn sample_for_identities(&mut self, identities: &impl MononokeIdentitySetExt) {
// Details of quicksand traffic aren't particularly interesting because all Quicksand tasks are
// doing effectively the same thing at the same time. If we need real-time debugging, we can
// always rely on updating the verbosity in real time.
if identities.is_quicksand() {
self.sampled_unless_verbose(nonzero!(100u64));
}
}
pub fn log_with_msg<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if self.fallback_sampled_out_to_verbose
&& self.should_log_with_level(ScubaVerbosityLevel::Verbose)
{
// We need to unsample before we log, so that
// `sample_rate` field is not added, as we are about
// to log everything.
self.inner.unsampled();
}
self.inner.add("log_tag", log_tag);
if let Some(mut msg) = msg.into() {
match tunables().get_max_scuba_msg_length().try_into() {
Ok(size) if size > 0 && msg.len() > size => {
msg.truncate(size);
msg.push_str(" (...)");
}
_ => {}
};
self.inner.add("msg", msg);
}
self.inner.log(); | pub fn log_with_msg_verbose<S: Into<Option<String>>>(&mut self, log_tag: &str, msg: S) {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
return;
}
self.log_with_msg(log_tag, msg)
}
pub fn add_stream_stats(&mut self, stats: &StreamStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add("count", stats.count)
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn add_future_stats(&mut self, stats: &FutureStats) -> &mut Self {
self.inner
.add("poll_count", stats.poll_count)
.add("poll_time_us", stats.poll_time.as_micros_unchecked())
.add(
"completion_time_us",
stats.completion_time.as_micros_unchecked(),
);
self
}
pub fn is_discard(&self) -> bool {
self.inner.is_discard()
}
pub fn sampled(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = false;
self.inner.sampled(sample_rate);
self
}
pub fn sampled_unless_verbose(&mut self, sample_rate: NonZeroU64) -> &mut Self {
self.fallback_sampled_out_to_verbose = true;
self.inner.sampled(sample_rate);
self
}
pub fn unsampled(&mut self) -> &mut Self {
self.inner.unsampled();
self
}
pub fn log(&mut self) -> bool {
self.inner.log()
}
/// Same as `log`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met
pub fn log_verbose(&mut self) -> bool {
if !self.should_log_with_level(ScubaVerbosityLevel::Verbose) {
// Return value of the `log` function indicates whether
// the sample passed sampling. If it's too verbose, let's
// return false
return false;
}
self.log()
}
pub fn add_common_server_data(&mut self) -> &mut Self {
self.inner.add_common_server_data();
self
}
pub fn sampling(&self) -> &Sampling {
self.inner.sampling()
}
pub fn add_mapped_common_server_data<F>(&mut self, mapper: F) -> &mut Self
where
F: Fn(ServerData) -> &'static str,
{
self.inner.add_mapped_common_server_data(mapper);
self
}
pub fn with_log_file<L: AsRef<Path>>(mut self, log_file: L) -> Result<Self, IoError> {
self.inner = self.inner.with_log_file(log_file)?;
Ok(self)
}
pub fn with_seq(mut self, key: impl Into<String>) -> Self {
self.inner = self.inner.with_seq(key);
self
}
pub fn log_with_time(&mut self, time: u64) -> bool {
self.inner.log_with_time(time)
}
pub fn entry<K: Into<String>>(&mut self, key: K) -> Entry<String, ScubaValue> {
self.inner.entry(key)
}
pub fn flush(&self, timeout: Duration) {
self.inner.flush(timeout)
}
pub fn get_sample(&self) -> &ScubaSample {
self.inner.get_sample()
}
pub fn add_opt<K: Into<String>, V: Into<ScubaValue>>(
&mut self,
key: K,
value: Option<V>,
) -> &mut Self {
self.inner.add_opt(key, value);
self
}
pub fn get<K: Into<String>>(&self, key: K) -> Option<&ScubaValue> {
self.inner.get(key)
}
} | }
/// Same as `log_with_msg`, but sample is assumed to be verbose and is only logged
/// if verbose logging conditions are met | random_line_split |
binding_on_syntax.test.ts | import { expect } from "chai";
import { Binding } from "../../src/bindings/binding";
import { BindingScopeEnum } from "../../src/constants/literal_types";
import { interfaces } from "../../src/interfaces/interfaces";
import { BindingOnSyntax } from "../../src/syntax/binding_on_syntax";
describe("BindingOnSyntax", () => {
it("Should set its own properties correctly", () => {
interface Ninja { }
const ninjaIdentifier = "Ninja";
const binding = new Binding<Ninja>(ninjaIdentifier, BindingScopeEnum.Transient);
const bindingOnSyntax = new BindingOnSyntax<Ninja>(binding);
// cast to any to be able to access private props
const _bindingOnSyntax: any = bindingOnSyntax;
expect(_bindingOnSyntax._binding.serviceIdentifier).eql(ninjaIdentifier);
});
it("Should be able to configure the activation handler of a binding", () => {
interface Ninja { }
const ninjaIdentifier = "Ninja";
const binding = new Binding<Ninja>(ninjaIdentifier, BindingScopeEnum.Transient);
const bindingOnSyntax = new BindingOnSyntax<Ninja>(binding);
bindingOnSyntax.onActivation((context: interfaces.Context, ninja: Ninja) => {
const handler = {};
return new Proxy<Ninja>(ninja, handler);
});
|
}); | expect(binding.onActivation).not.to.eql(null);
}); | random_line_split |
user-form.js | PhaxMachine.pages['user-form'] = {
render: function() {
$('#addUserEmail').on('click', function() {
var emailInputs = $('#userEmailList input[type=email]');
var nextIdx = emailInputs.length;
var newInput = emailInputs.first().clone();
newInput.attr('id', 'user_user_emails_attributes_' + nextIdx + '_email');
newInput.attr('name', 'user[user_emails_attributes][' + nextIdx + '][email]');
newInput.attr('value', '');
newInput.val('');
// Super ugly, but it's 2 AM and this needs to go out.
var inputGroup = $('<div class="input-group"></div>')
var hiddenFieldHtml = '<input class="destroy-field" type="hidden" value="false" name="user[user_emails_attributes][' + nextIdx + '][_destroy]" id="user_user_emails_attributes_' + nextIdx + '__destroy">';
var inputGroupBtn = $('<span class="input-group-btn">' + hiddenFieldHtml + '<a class="btn btn-secondary btn-remove-email" ><i class="glyphicon glyphicon-trash"></i></a></span>');
inputGroup.append(newInput);
inputGroup.append(inputGroupBtn);
$('#userEmailList').append(inputGroup);
});
$(document).on('click', '.btn-remove-email', function() {
var inputGroup = $(this).closest('.input-group');
inputGroup.find('input[type="email"]').val('[email protected]')
inputGroup.find('.destroy-field').val(true)
inputGroup.hide(); | }
} | }); | random_line_split |
link-checker.js | const glob = require('glob');
const fs = require('fs');
const path = require('path');
const request = require('request');
const checkedUrls = {
'src/archive/index.php': '',
};
function | (url) {
return new Promise(function(resolve, reject) {
request(
url,
{ followRedirect: false, followAllRedirects: false, headers: { 'Cache-Control': 'no-cache' } },
(error, response, body) => resolve([response.statusCode, body]));
});
}
function createUrl(file) {
return 'http://localhost:8080/' + file.replace('src/', '');
}
async function isValidUrl(file, url, id) {
let isValid = false;
const currentDirectory = path.dirname(file);
let targetPath;
if (url.startsWith('#')) {
targetPath = file;
} else {
targetPath = path.join(url.startsWith('/') ? 'src' : currentDirectory, url.split('?')[0]);
}
if (targetPath.endsWith('/')) {
targetPath = path.join(targetPath, 'index.php');
}
if (checkedUrls[targetPath] !== undefined || fs.existsSync(targetPath)) {
if (id) {
if (!checkedUrls[targetPath]) {
const [statusCode, contents] = await getUrlContents(createUrl(targetPath));
if (statusCode === 200) {
checkedUrls[targetPath] = contents;
}
}
const html = checkedUrls[targetPath];
const idExists = new RegExp(`id=["']${id}["']`).test(html);
const generatedIds = [];
if (!idExists) {
const headerPattern = /<h([1-4])>(.+)<\/h\1>/g;
let matches;
while ((matches = headerPattern.exec(html))) {
const generatedId = matches[2]
.replace(/<\/?[^\>]+\>/g, '')
.replace(/[^\w\d\s\(\.\-]/g, '')
.replace(/[\s\(\.]/g, '-')
.toLowerCase();
generatedIds.push(generatedId);
}
}
isValid = idExists || generatedIds.indexOf(id) >= 0;
} else {
isValid = true;
if (checkedUrls[targetPath] === undefined) {
checkedUrls[targetPath] = '';
}
}
}
return isValid;
}
async function verifyFile(file) {
const regex = /<a.*href=['"]([^'"]+)['"]/g;
const [statusCode, contents] = await getUrlContents(createUrl(file));
if (statusCode !== 200) {
//console.log(`\u2753 Ignoring ${file} (${statusCode})`);
}
let matches;
let urlCount = 0;
let validUrls = [];
let invalidUrls = [];
let ignoredUrls = [];
while ((matches = regex.exec(contents))) {
const [urlMatch] = matches.slice(1);
const [url, id] = urlMatch.split('#');
const ignoreList = ['mailto:', 'dist/', 'http'];
if (ignoreList.some(x => url.startsWith(x)) || (id && id.startsWith('example-'))) {
ignoredUrls.push(url);
} else {
const isValid = await isValidUrl(file, url, id);
if (isValid) {
validUrls.push(urlMatch);
} else {
invalidUrls.push(urlMatch);
}
}
urlCount++;
}
if (invalidUrls.length) {
console.log(`\u274C Found errors in ${file}. Invalid URLs found linking to:\n-> ${invalidUrls.join('\n-> ')}`);
} else {
//console.log(`\u2714 Verified ${validUrls.length} links in ${file} (ignored ${ignoredUrls.length})`);
}
}
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
glob('src/javascript-*/*.php', {}, async (_, files) => await asyncForEach(files, verifyFile));
| getUrlContents | identifier_name |
link-checker.js | const glob = require('glob');
const fs = require('fs');
const path = require('path');
const request = require('request');
const checkedUrls = {
'src/archive/index.php': '',
};
function getUrlContents(url) {
return new Promise(function(resolve, reject) {
request(
url,
{ followRedirect: false, followAllRedirects: false, headers: { 'Cache-Control': 'no-cache' } },
(error, response, body) => resolve([response.statusCode, body]));
});
}
function createUrl(file) {
return 'http://localhost:8080/' + file.replace('src/', '');
}
async function isValidUrl(file, url, id) |
async function verifyFile(file) {
const regex = /<a.*href=['"]([^'"]+)['"]/g;
const [statusCode, contents] = await getUrlContents(createUrl(file));
if (statusCode !== 200) {
//console.log(`\u2753 Ignoring ${file} (${statusCode})`);
}
let matches;
let urlCount = 0;
let validUrls = [];
let invalidUrls = [];
let ignoredUrls = [];
while ((matches = regex.exec(contents))) {
const [urlMatch] = matches.slice(1);
const [url, id] = urlMatch.split('#');
const ignoreList = ['mailto:', 'dist/', 'http'];
if (ignoreList.some(x => url.startsWith(x)) || (id && id.startsWith('example-'))) {
ignoredUrls.push(url);
} else {
const isValid = await isValidUrl(file, url, id);
if (isValid) {
validUrls.push(urlMatch);
} else {
invalidUrls.push(urlMatch);
}
}
urlCount++;
}
if (invalidUrls.length) {
console.log(`\u274C Found errors in ${file}. Invalid URLs found linking to:\n-> ${invalidUrls.join('\n-> ')}`);
} else {
//console.log(`\u2714 Verified ${validUrls.length} links in ${file} (ignored ${ignoredUrls.length})`);
}
}
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
glob('src/javascript-*/*.php', {}, async (_, files) => await asyncForEach(files, verifyFile));
| {
let isValid = false;
const currentDirectory = path.dirname(file);
let targetPath;
if (url.startsWith('#')) {
targetPath = file;
} else {
targetPath = path.join(url.startsWith('/') ? 'src' : currentDirectory, url.split('?')[0]);
}
if (targetPath.endsWith('/')) {
targetPath = path.join(targetPath, 'index.php');
}
if (checkedUrls[targetPath] !== undefined || fs.existsSync(targetPath)) {
if (id) {
if (!checkedUrls[targetPath]) {
const [statusCode, contents] = await getUrlContents(createUrl(targetPath));
if (statusCode === 200) {
checkedUrls[targetPath] = contents;
}
}
const html = checkedUrls[targetPath];
const idExists = new RegExp(`id=["']${id}["']`).test(html);
const generatedIds = [];
if (!idExists) {
const headerPattern = /<h([1-4])>(.+)<\/h\1>/g;
let matches;
while ((matches = headerPattern.exec(html))) {
const generatedId = matches[2]
.replace(/<\/?[^\>]+\>/g, '')
.replace(/[^\w\d\s\(\.\-]/g, '')
.replace(/[\s\(\.]/g, '-')
.toLowerCase();
generatedIds.push(generatedId);
}
}
isValid = idExists || generatedIds.indexOf(id) >= 0;
} else {
isValid = true;
if (checkedUrls[targetPath] === undefined) {
checkedUrls[targetPath] = '';
}
}
}
return isValid;
} | identifier_body |
link-checker.js | const glob = require('glob');
const fs = require('fs');
const path = require('path');
const request = require('request');
const checkedUrls = {
'src/archive/index.php': '',
};
function getUrlContents(url) {
return new Promise(function(resolve, reject) {
request(
url,
{ followRedirect: false, followAllRedirects: false, headers: { 'Cache-Control': 'no-cache' } },
(error, response, body) => resolve([response.statusCode, body]));
});
}
function createUrl(file) {
return 'http://localhost:8080/' + file.replace('src/', '');
}
async function isValidUrl(file, url, id) {
let isValid = false;
const currentDirectory = path.dirname(file);
let targetPath;
if (url.startsWith('#')) {
targetPath = file;
} else {
targetPath = path.join(url.startsWith('/') ? 'src' : currentDirectory, url.split('?')[0]);
}
if (targetPath.endsWith('/')) {
targetPath = path.join(targetPath, 'index.php');
}
if (checkedUrls[targetPath] !== undefined || fs.existsSync(targetPath)) {
if (id) {
if (!checkedUrls[targetPath]) {
const [statusCode, contents] = await getUrlContents(createUrl(targetPath));
if (statusCode === 200) {
checkedUrls[targetPath] = contents;
}
}
const html = checkedUrls[targetPath];
const idExists = new RegExp(`id=["']${id}["']`).test(html);
const generatedIds = [];
if (!idExists) {
const headerPattern = /<h([1-4])>(.+)<\/h\1>/g;
let matches;
while ((matches = headerPattern.exec(html))) {
const generatedId = matches[2]
.replace(/<\/?[^\>]+\>/g, '')
.replace(/[^\w\d\s\(\.\-]/g, '')
.replace(/[\s\(\.]/g, '-')
.toLowerCase();
generatedIds.push(generatedId);
}
}
isValid = idExists || generatedIds.indexOf(id) >= 0;
} else {
isValid = true;
if (checkedUrls[targetPath] === undefined) {
checkedUrls[targetPath] = '';
}
}
}
return isValid;
}
async function verifyFile(file) {
const regex = /<a.*href=['"]([^'"]+)['"]/g;
const [statusCode, contents] = await getUrlContents(createUrl(file));
if (statusCode !== 200) |
let matches;
let urlCount = 0;
let validUrls = [];
let invalidUrls = [];
let ignoredUrls = [];
while ((matches = regex.exec(contents))) {
const [urlMatch] = matches.slice(1);
const [url, id] = urlMatch.split('#');
const ignoreList = ['mailto:', 'dist/', 'http'];
if (ignoreList.some(x => url.startsWith(x)) || (id && id.startsWith('example-'))) {
ignoredUrls.push(url);
} else {
const isValid = await isValidUrl(file, url, id);
if (isValid) {
validUrls.push(urlMatch);
} else {
invalidUrls.push(urlMatch);
}
}
urlCount++;
}
if (invalidUrls.length) {
console.log(`\u274C Found errors in ${file}. Invalid URLs found linking to:\n-> ${invalidUrls.join('\n-> ')}`);
} else {
//console.log(`\u2714 Verified ${validUrls.length} links in ${file} (ignored ${ignoredUrls.length})`);
}
}
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
glob('src/javascript-*/*.php', {}, async (_, files) => await asyncForEach(files, verifyFile));
| {
//console.log(`\u2753 Ignoring ${file} (${statusCode})`);
} | conditional_block |
link-checker.js | const glob = require('glob');
const fs = require('fs');
const path = require('path');
const request = require('request');
const checkedUrls = {
'src/archive/index.php': '',
};
function getUrlContents(url) {
return new Promise(function(resolve, reject) {
request(
url,
{ followRedirect: false, followAllRedirects: false, headers: { 'Cache-Control': 'no-cache' } },
(error, response, body) => resolve([response.statusCode, body]));
});
}
function createUrl(file) {
return 'http://localhost:8080/' + file.replace('src/', '');
}
async function isValidUrl(file, url, id) {
let isValid = false;
const currentDirectory = path.dirname(file);
let targetPath;
if (url.startsWith('#')) {
targetPath = file;
} else {
targetPath = path.join(url.startsWith('/') ? 'src' : currentDirectory, url.split('?')[0]);
}
if (targetPath.endsWith('/')) {
targetPath = path.join(targetPath, 'index.php');
}
if (checkedUrls[targetPath] !== undefined || fs.existsSync(targetPath)) {
if (id) {
if (!checkedUrls[targetPath]) {
const [statusCode, contents] = await getUrlContents(createUrl(targetPath));
if (statusCode === 200) {
checkedUrls[targetPath] = contents;
}
}
const html = checkedUrls[targetPath];
const idExists = new RegExp(`id=["']${id}["']`).test(html);
const generatedIds = [];
if (!idExists) {
const headerPattern = /<h([1-4])>(.+)<\/h\1>/g;
let matches;
while ((matches = headerPattern.exec(html))) {
const generatedId = matches[2]
.replace(/<\/?[^\>]+\>/g, '')
.replace(/[^\w\d\s\(\.\-]/g, '')
.replace(/[\s\(\.]/g, '-')
.toLowerCase();
generatedIds.push(generatedId);
}
}
isValid = idExists || generatedIds.indexOf(id) >= 0;
} else {
isValid = true;
if (checkedUrls[targetPath] === undefined) {
checkedUrls[targetPath] = '';
}
}
}
return isValid;
}
async function verifyFile(file) {
const regex = /<a.*href=['"]([^'"]+)['"]/g;
const [statusCode, contents] = await getUrlContents(createUrl(file));
if (statusCode !== 200) {
//console.log(`\u2753 Ignoring ${file} (${statusCode})`);
}
let matches;
let urlCount = 0;
let validUrls = [];
let invalidUrls = [];
let ignoredUrls = [];
while ((matches = regex.exec(contents))) {
const [urlMatch] = matches.slice(1);
const [url, id] = urlMatch.split('#');
const ignoreList = ['mailto:', 'dist/', 'http'];
if (ignoreList.some(x => url.startsWith(x)) || (id && id.startsWith('example-'))) {
ignoredUrls.push(url);
} else {
const isValid = await isValidUrl(file, url, id);
| if (isValid) {
validUrls.push(urlMatch);
} else {
invalidUrls.push(urlMatch);
}
}
urlCount++;
}
if (invalidUrls.length) {
console.log(`\u274C Found errors in ${file}. Invalid URLs found linking to:\n-> ${invalidUrls.join('\n-> ')}`);
} else {
//console.log(`\u2714 Verified ${validUrls.length} links in ${file} (ignored ${ignoredUrls.length})`);
}
}
async function asyncForEach(array, callback) {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array);
}
}
glob('src/javascript-*/*.php', {}, async (_, files) => await asyncForEach(files, verifyFile)); | random_line_split |
|
checkout-yubikey.py | #!/usr/bin/python
###
#
# checkout-yubikey.py
#
# Troy Axthelm
# Advanced Research Computing Center
# University of Wyoming
# [email protected]
#
# Created: 13 June 2016
#
#
# Modified: <initials> <day> <month> <year> <change notes>
#
###
import sys
sys.path.insert(0, './lib/')
import logging
import subprocess
import idm_manage
import argparse
__version__='1.0'
# argparser and auto generate help
# checkout-yubikey usage
usage = "%(prog)s <username> <yubikeyid>"
parser = argparse.ArgumentParser(prog='checkout-yubikey.py', usage=usage,
description='Checkout a yubikey to an IDM user')
| # version
parser.add_argument('--version', action='version', version="%(prog)s "+__version__)
parser.add_argument('username', nargs='+')
parser.add_argument('yubikeyid', nargs='+')
# create parser
args = parser.parse_args()
print args
#issue ipa command
idm_manage.addyubikey(args.username[0], args.yubikeyid[0]) | parser.set_defaults()
| random_line_split |
quobyte.py | # Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def | (self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
| disconnect_volume | identifier_name |
quobyte.py | # Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"] | _is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export | SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
| random_line_split |
quobyte.py | # Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
|
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
| """Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path) | identifier_body |
quobyte.py | # Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
|
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
| if state == sysdout.strip():
tmp_is_systemd = True
break | conditional_block |
capitalize-pipe.ts | /// <reference path="../../../typings/_custom.d.ts" />
import {Pipe, PipeFactory} from 'angular2/angular2';
// Check if the value is supported for the pipe
export function isString(txt): boolean {
return typeof txt === 'string';
}
// Simple example of a Pipe
export class CapitalizePipe implements Pipe {
regexp: RegExp = /([^\W_]+[^\s-]*) */g;
supports(txt): boolean {
return isString(txt);
}
transform(value: string, args?: List<any>): any {
return (!value) ? '' :
(!args) ?
CapitalizePipe.capitalizeWord(value) :
value.replace(this.regexp, CapitalizePipe.capitalizeWord);
}
static capitalizeWord(txt: string): string {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
}
| (): void {
// not needed since this is stateless
}
}
// We create a factory since we create an instance for each binding for stateful pipes
export class CapitalizeFactory implements PipeFactory {
supports(txt): boolean {
return isString(txt);
}
create(cdRef): Pipe {
return new CapitalizePipe();
}
}
// Since templates in angular are async we are passing the value to
// NullPipeFactory if the value is not supported
export var capitalize = [ new CapitalizeFactory() ];
| onDestroy | identifier_name |
capitalize-pipe.ts | /// <reference path="../../../typings/_custom.d.ts" />
import {Pipe, PipeFactory} from 'angular2/angular2';
// Check if the value is supported for the pipe
export function isString(txt): boolean {
return typeof txt === 'string';
}
// Simple example of a Pipe
export class CapitalizePipe implements Pipe {
regexp: RegExp = /([^\W_]+[^\s-]*) */g;
supports(txt): boolean {
return isString(txt);
}
transform(value: string, args?: List<any>): any {
return (!value) ? '' :
(!args) ?
CapitalizePipe.capitalizeWord(value) :
value.replace(this.regexp, CapitalizePipe.capitalizeWord);
}
static capitalizeWord(txt: string): string {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
}
onDestroy(): void {
// not needed since this is stateless
}
}
// We create a factory since we create an instance for each binding for stateful pipes
export class CapitalizeFactory implements PipeFactory {
supports(txt): boolean |
create(cdRef): Pipe {
return new CapitalizePipe();
}
}
// Since templates in angular are async we are passing the value to
// NullPipeFactory if the value is not supported
export var capitalize = [ new CapitalizeFactory() ];
| {
return isString(txt);
} | identifier_body |
capitalize-pipe.ts | /// <reference path="../../../typings/_custom.d.ts" />
import {Pipe, PipeFactory} from 'angular2/angular2';
// Check if the value is supported for the pipe
export function isString(txt): boolean {
return typeof txt === 'string';
}
// Simple example of a Pipe
export class CapitalizePipe implements Pipe {
regexp: RegExp = /([^\W_]+[^\s-]*) */g;
supports(txt): boolean {
return isString(txt); | }
transform(value: string, args?: List<any>): any {
return (!value) ? '' :
(!args) ?
CapitalizePipe.capitalizeWord(value) :
value.replace(this.regexp, CapitalizePipe.capitalizeWord);
}
static capitalizeWord(txt: string): string {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
}
onDestroy(): void {
// not needed since this is stateless
}
}
// We create a factory since we create an instance for each binding for stateful pipes
export class CapitalizeFactory implements PipeFactory {
supports(txt): boolean {
return isString(txt);
}
create(cdRef): Pipe {
return new CapitalizePipe();
}
}
// Since templates in angular are async we are passing the value to
// NullPipeFactory if the value is not supported
export var capitalize = [ new CapitalizeFactory() ]; | random_line_split |
|
prepare.js | /**
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/* jshint node:true */
var platform_modules = require('./platforms'),
path = require('path'),
config_changes = require('./util/config-changes'),
xml_helpers = require('../util/xml-helpers'),
wp7 = require('./platforms/wp7'),
wp8 = require('./platforms/wp8'),
windows8 = require('./platforms/windows8'),
common = require('./platforms/common');
fs = require('fs'),
shell = require('shelljs'),
util = require('util'),
events = require('./events'),
plugman = require('./plugman'),
et = require('elementtree');
// Called on --prepare.
// Sets up each plugin's Javascript code to be loaded properly.
// Expects a path to the project (platforms/android in CLI, . in plugman-only),
// a path to where the plugins are downloaded, the www dir, and the platform ('android', 'ios', etc.).
module.exports = function handlePrepare(project_dir, platform, plugins_dir, www_dir) {
// Process:
// - Do config munging by calling into config-changes module
// - List all plugins in plugins_dir
// - Load and parse their plugin.xml files.
// - Skip those without support for this platform. (No <platform> tags means JS-only!)
// - Build a list of all their js-modules, including platform-specific js-modules.
// - For each js-module (general first, then platform) build up an object storing the path and any clobbers, merges and runs for it.
// - Write this object into www/cordova_plugins.json.
// - Cordova.js contains code to load them at runtime from that file.
events.emit('verbose', 'Preparing ' + platform + ' project');
var platform_json = config_changes.get_platform_json(plugins_dir, platform);
var wwwDir = www_dir || platform_modules[platform].www_dir(project_dir);
// Check if there are any plugins queued for uninstallation, and if so, remove any of their plugin web assets loaded in
// via <js-module> elements
var plugins_to_uninstall = platform_json.prepare_queue.uninstalled;
if (plugins_to_uninstall && plugins_to_uninstall.length) {
var plugins_www = path.join(wwwDir, 'plugins');
if (fs.existsSync(plugins_www)) {
plugins_to_uninstall.forEach(function(plug) {
var id = plug.id;
var plugin_modules = path.join(plugins_www, id);
if (fs.existsSync(plugin_modules)) {
events.emit('verbose', 'Removing plugins directory from www "'+plugin_modules+'"');
shell.rm('-rf', plugin_modules);
}
});
}
}
events.emit('verbose', 'Processing configuration changes for plugins.');
config_changes.process(plugins_dir, project_dir, platform);
// for windows phone platform we need to add all www resources to the .csproj file
// first we need to remove them all to prevent duplicates
var wp_csproj;
if(platform == 'wp7' || platform == 'wp8') {
wp_csproj = (platform == wp7? wp7.parseProjectFile(project_dir) : wp8.parseProjectFile(project_dir));
var item_groups = wp_csproj.xml.findall('ItemGroup');
for (var i = 0, l = item_groups.length; i < l; i++) {
var group = item_groups[i];
var files = group.findall('Content');
for (var j = 0, k = files.length; j < k; j++) {
var file = files[j];
if (file.attrib.Include.substr(0,11) == "www\\plugins" || file.attrib.Include == "www\\cordova_plugins.js") {
// remove file reference
group.remove(0, file);
// remove ItemGroup if empty
var new_group = group.findall('Content');
if(new_group.length < 1) {
wp_csproj.xml.getroot().remove(0, group);
}
}
}
}
}
else if(platform == "windows8") {
wp_csproj = windows8.parseProjectFile(project_dir);
var item_groups = wp_csproj.xml.findall('ItemGroup');
for (var i = 0, l = item_groups.length; i < l; i++) {
var group = item_groups[i];
var files = group.findall('Content');
for (var j = 0, k = files.length; j < k; j++) {
var file = files[j];
if (file.attrib.Include.substr(0,11) == "www\\plugins" || file.attrib.Include == "www\\cordova_plugins.js") {
// remove file reference
group.remove(0, file);
// remove ItemGroup if empty
var new_group = group.findall('Content');
if(new_group.length < 1) {
wp_csproj.xml.getroot().remove(0, group);
}
}
}
}
}
platform_json = config_changes.get_platform_json(plugins_dir, platform);
// This array holds all the metadata for each module and ends up in cordova_plugins.json
var plugins = Object.keys(platform_json.installed_plugins).concat(Object.keys(platform_json.dependent_plugins));
var moduleObjects = [];
var pluginMetadata = {};
events.emit('verbose', 'Iterating over installed plugins:', plugins);
plugins && plugins.forEach(function(plugin) {
var pluginDir = path.join(plugins_dir, plugin),
pluginXML = path.join(pluginDir, 'plugin.xml');
if (!fs.existsSync(pluginXML)) {
plugman.emit('warn', 'Missing file: ' + pluginXML);
return;
}
var xml = xml_helpers.parseElementtreeSync(pluginXML);
var plugin_id = xml.getroot().attrib.id;
// pluginMetadata is a mapping from plugin IDs to versions.
pluginMetadata[plugin_id] = xml.getroot().attrib.version;
// add the plugins dir to the platform's www.
var platformPluginsDir = path.join(wwwDir, 'plugins');
// XXX this should not be here if there are no js-module. It leaves an empty plugins/ directory
shell.mkdir('-p', platformPluginsDir);
| var platformTag = xml.find(util.format('./platform[@name="%s"]', platform));
if (platformTag) {
assets = assets.concat(platformTag.findall('./asset'));
jsModules = jsModules.concat(platformTag.findall('./js-module'));
}
// Copy www assets described in <asset> tags.
assets = assets || [];
assets.forEach(function(asset) {
common.asset.install(asset, pluginDir, wwwDir);
});
jsModules.forEach(function(module) {
// Copy the plugin's files into the www directory.
// NB: We can't always use path.* functions here, because they will use platform slashes.
// But the path in the plugin.xml and in the cordova_plugins.js should be always forward slashes.
var pathParts = module.attrib.src.split('/');
var fsDirname = path.join.apply(path, pathParts.slice(0, -1));
var fsDir = path.join(platformPluginsDir, plugin_id, fsDirname);
shell.mkdir('-p', fsDir);
// Read in the file, prepend the cordova.define, and write it back out.
var moduleName = plugin_id + '.';
if (module.attrib.name) {
moduleName += module.attrib.name;
} else {
var result = module.attrib.src.match(/([^\/]+)\.js/);
moduleName += result[1];
}
var fsPath = path.join.apply(path, pathParts);
var scriptContent = fs.readFileSync(path.join(pluginDir, fsPath), 'utf-8');
scriptContent = 'cordova.define("' + moduleName + '", function(require, exports, module) { ' + scriptContent + '\n});\n';
fs.writeFileSync(path.join(platformPluginsDir, plugin_id, fsPath), scriptContent, 'utf-8');
if(platform == 'wp7' || platform == 'wp8' || platform == "windows8") {
wp_csproj.addSourceFile(path.join('www', 'plugins', plugin_id, fsPath));
}
// Prepare the object for cordova_plugins.json.
var obj = {
file: ['plugins', plugin_id, module.attrib.src].join('/'),
id: moduleName
};
// Loop over the children of the js-module tag, collecting clobbers, merges and runs.
module.getchildren().forEach(function(child) {
if (child.tag.toLowerCase() == 'clobbers') {
if (!obj.clobbers) {
obj.clobbers = [];
}
obj.clobbers.push(child.attrib.target);
} else if (child.tag.toLowerCase() == 'merges') {
if (!obj.merges) {
obj.merges = [];
}
obj.merges.push(child.attrib.target);
} else if (child.tag.toLowerCase() == 'runs') {
obj.runs = true;
}
});
// Add it to the list of module objects bound for cordova_plugins.json
moduleObjects.push(obj);
});
});
// Write out moduleObjects as JSON wrapped in a cordova module to cordova_plugins.js
var final_contents = "cordova.define('cordova/plugin_list', function(require, exports, module) {\n";
final_contents += 'module.exports = ' + JSON.stringify(moduleObjects,null,' ') + ';\n';
final_contents += 'module.exports.metadata = \n';
final_contents += '// TOP OF METADATA\n';
final_contents += JSON.stringify(pluginMetadata, null, ' ') + '\n';
final_contents += '// BOTTOM OF METADATA\n';
final_contents += '});'; // Close cordova.define.
events.emit('verbose', 'Writing out cordova_plugins.js...');
fs.writeFileSync(path.join(wwwDir, 'cordova_plugins.js'), final_contents, 'utf-8');
if(platform == 'wp7' || platform == 'wp8' || platform == "windows8") {
wp_csproj.addSourceFile(path.join('www', 'cordova_plugins.js'));
wp_csproj.write();
}
}; | var jsModules = xml.findall('./js-module');
var assets = xml.findall('asset'); | random_line_split |
prepare.js | /**
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/* jshint node:true */
var platform_modules = require('./platforms'),
path = require('path'),
config_changes = require('./util/config-changes'),
xml_helpers = require('../util/xml-helpers'),
wp7 = require('./platforms/wp7'),
wp8 = require('./platforms/wp8'),
windows8 = require('./platforms/windows8'),
common = require('./platforms/common');
fs = require('fs'),
shell = require('shelljs'),
util = require('util'),
events = require('./events'),
plugman = require('./plugman'),
et = require('elementtree');
// Called on --prepare.
// Sets up each plugin's Javascript code to be loaded properly.
// Expects a path to the project (platforms/android in CLI, . in plugman-only),
// a path to where the plugins are downloaded, the www dir, and the platform ('android', 'ios', etc.).
module.exports = function handlePrepare(project_dir, platform, plugins_dir, www_dir) {
// Process:
// - Do config munging by calling into config-changes module
// - List all plugins in plugins_dir
// - Load and parse their plugin.xml files.
// - Skip those without support for this platform. (No <platform> tags means JS-only!)
// - Build a list of all their js-modules, including platform-specific js-modules.
// - For each js-module (general first, then platform) build up an object storing the path and any clobbers, merges and runs for it.
// - Write this object into www/cordova_plugins.json.
// - Cordova.js contains code to load them at runtime from that file.
events.emit('verbose', 'Preparing ' + platform + ' project');
var platform_json = config_changes.get_platform_json(plugins_dir, platform);
var wwwDir = www_dir || platform_modules[platform].www_dir(project_dir);
// Check if there are any plugins queued for uninstallation, and if so, remove any of their plugin web assets loaded in
// via <js-module> elements
var plugins_to_uninstall = platform_json.prepare_queue.uninstalled;
if (plugins_to_uninstall && plugins_to_uninstall.length) {
var plugins_www = path.join(wwwDir, 'plugins');
if (fs.existsSync(plugins_www)) {
plugins_to_uninstall.forEach(function(plug) {
var id = plug.id;
var plugin_modules = path.join(plugins_www, id);
if (fs.existsSync(plugin_modules)) {
events.emit('verbose', 'Removing plugins directory from www "'+plugin_modules+'"');
shell.rm('-rf', plugin_modules);
}
});
}
}
events.emit('verbose', 'Processing configuration changes for plugins.');
config_changes.process(plugins_dir, project_dir, platform);
// for windows phone platform we need to add all www resources to the .csproj file
// first we need to remove them all to prevent duplicates
var wp_csproj;
if(platform == 'wp7' || platform == 'wp8') {
wp_csproj = (platform == wp7? wp7.parseProjectFile(project_dir) : wp8.parseProjectFile(project_dir));
var item_groups = wp_csproj.xml.findall('ItemGroup');
for (var i = 0, l = item_groups.length; i < l; i++) {
var group = item_groups[i];
var files = group.findall('Content');
for (var j = 0, k = files.length; j < k; j++) {
var file = files[j];
if (file.attrib.Include.substr(0,11) == "www\\plugins" || file.attrib.Include == "www\\cordova_plugins.js") {
// remove file reference
group.remove(0, file);
// remove ItemGroup if empty
var new_group = group.findall('Content');
if(new_group.length < 1) {
wp_csproj.xml.getroot().remove(0, group);
}
}
}
}
}
else if(platform == "windows8") {
wp_csproj = windows8.parseProjectFile(project_dir);
var item_groups = wp_csproj.xml.findall('ItemGroup');
for (var i = 0, l = item_groups.length; i < l; i++) {
var group = item_groups[i];
var files = group.findall('Content');
for (var j = 0, k = files.length; j < k; j++) {
var file = files[j];
if (file.attrib.Include.substr(0,11) == "www\\plugins" || file.attrib.Include == "www\\cordova_plugins.js") {
// remove file reference
group.remove(0, file);
// remove ItemGroup if empty
var new_group = group.findall('Content');
if(new_group.length < 1) {
wp_csproj.xml.getroot().remove(0, group);
}
}
}
}
}
platform_json = config_changes.get_platform_json(plugins_dir, platform);
// This array holds all the metadata for each module and ends up in cordova_plugins.json
var plugins = Object.keys(platform_json.installed_plugins).concat(Object.keys(platform_json.dependent_plugins));
var moduleObjects = [];
var pluginMetadata = {};
events.emit('verbose', 'Iterating over installed plugins:', plugins);
plugins && plugins.forEach(function(plugin) {
var pluginDir = path.join(plugins_dir, plugin),
pluginXML = path.join(pluginDir, 'plugin.xml');
if (!fs.existsSync(pluginXML)) {
plugman.emit('warn', 'Missing file: ' + pluginXML);
return;
}
var xml = xml_helpers.parseElementtreeSync(pluginXML);
var plugin_id = xml.getroot().attrib.id;
// pluginMetadata is a mapping from plugin IDs to versions.
pluginMetadata[plugin_id] = xml.getroot().attrib.version;
// add the plugins dir to the platform's www.
var platformPluginsDir = path.join(wwwDir, 'plugins');
// XXX this should not be here if there are no js-module. It leaves an empty plugins/ directory
shell.mkdir('-p', platformPluginsDir);
var jsModules = xml.findall('./js-module');
var assets = xml.findall('asset');
var platformTag = xml.find(util.format('./platform[@name="%s"]', platform));
if (platformTag) {
assets = assets.concat(platformTag.findall('./asset'));
jsModules = jsModules.concat(platformTag.findall('./js-module'));
}
// Copy www assets described in <asset> tags.
assets = assets || [];
assets.forEach(function(asset) {
common.asset.install(asset, pluginDir, wwwDir);
});
jsModules.forEach(function(module) {
// Copy the plugin's files into the www directory.
// NB: We can't always use path.* functions here, because they will use platform slashes.
// But the path in the plugin.xml and in the cordova_plugins.js should be always forward slashes.
var pathParts = module.attrib.src.split('/');
var fsDirname = path.join.apply(path, pathParts.slice(0, -1));
var fsDir = path.join(platformPluginsDir, plugin_id, fsDirname);
shell.mkdir('-p', fsDir);
// Read in the file, prepend the cordova.define, and write it back out.
var moduleName = plugin_id + '.';
if (module.attrib.name) {
moduleName += module.attrib.name;
} else {
var result = module.attrib.src.match(/([^\/]+)\.js/);
moduleName += result[1];
}
var fsPath = path.join.apply(path, pathParts);
var scriptContent = fs.readFileSync(path.join(pluginDir, fsPath), 'utf-8');
scriptContent = 'cordova.define("' + moduleName + '", function(require, exports, module) { ' + scriptContent + '\n});\n';
fs.writeFileSync(path.join(platformPluginsDir, plugin_id, fsPath), scriptContent, 'utf-8');
if(platform == 'wp7' || platform == 'wp8' || platform == "windows8") {
wp_csproj.addSourceFile(path.join('www', 'plugins', plugin_id, fsPath));
}
// Prepare the object for cordova_plugins.json.
var obj = {
file: ['plugins', plugin_id, module.attrib.src].join('/'),
id: moduleName
};
// Loop over the children of the js-module tag, collecting clobbers, merges and runs.
module.getchildren().forEach(function(child) {
if (child.tag.toLowerCase() == 'clobbers') {
if (!obj.clobbers) {
obj.clobbers = [];
}
obj.clobbers.push(child.attrib.target);
} else if (child.tag.toLowerCase() == 'merges') {
if (!obj.merges) {
obj.merges = [];
}
obj.merges.push(child.attrib.target);
} else if (child.tag.toLowerCase() == 'runs') |
});
// Add it to the list of module objects bound for cordova_plugins.json
moduleObjects.push(obj);
});
});
// Write out moduleObjects as JSON wrapped in a cordova module to cordova_plugins.js
var final_contents = "cordova.define('cordova/plugin_list', function(require, exports, module) {\n";
final_contents += 'module.exports = ' + JSON.stringify(moduleObjects,null,' ') + ';\n';
final_contents += 'module.exports.metadata = \n';
final_contents += '// TOP OF METADATA\n';
final_contents += JSON.stringify(pluginMetadata, null, ' ') + '\n';
final_contents += '// BOTTOM OF METADATA\n';
final_contents += '});'; // Close cordova.define.
events.emit('verbose', 'Writing out cordova_plugins.js...');
fs.writeFileSync(path.join(wwwDir, 'cordova_plugins.js'), final_contents, 'utf-8');
if(platform == 'wp7' || platform == 'wp8' || platform == "windows8") {
wp_csproj.addSourceFile(path.join('www', 'cordova_plugins.js'));
wp_csproj.write();
}
};
| {
obj.runs = true;
} | conditional_block |
function.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::rc::Rc;
use std::cell::RefCell;
use value::*;
use ast;
use environment::Environment;
use runtime::RuntimeError;
#[derive(Clone, Debug)]
pub struct CallSign {
pub num_params: usize,
pub variadic: bool,
}
#[derive(Clone, Debug)]
pub enum Function {
NativeVoid(CallSign, fn(Vec<Value>) -> Result<(), RuntimeError>),
NativeReturning(CallSign, fn(Vec<Value>) -> Result<Value, RuntimeError>),
User {
call_sign: CallSign,
param_names: Vec<String>,
body: Box<ast::StmtNode>,
env: Rc<RefCell<Environment>>,
},
}
impl Function {
pub fn get_call_sign(&self) -> CallSign {
match *self {
Function::NativeVoid(ref call_sign, _) |
Function::NativeReturning(ref call_sign, _) |
Function::User { ref call_sign, .. } => call_sign.clone(),
}
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_println(args: Vec<Value>) -> Result<(), RuntimeError> {
if args.is_empty() {
return Ok(());
}
if args.len() == 1 {
println!("{}", args[0]);
} else {
print!("{}", args[0]);
for arg in args.iter().skip(1) {
print!(" {}", arg);
}
println!("");
}
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert(args: Vec<Value>) -> Result<(), RuntimeError> {
let val = &args[0];
if !val.is_truthy() {
Err(RuntimeError::GeneralRuntimeError(
format!("assert: assertion failed for value {}", val),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert_eq(args: Vec<Value>) -> Result<(), RuntimeError> {
let (val1, val2) = (&args[0], &args[1]);
if val1 != val2 {
Err(RuntimeError::GeneralRuntimeError(
format!("assert_eq: {} != {}", val1, val2),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_len(args: Vec<Value>) -> Result<Value, RuntimeError> {
let val = &args[0];
match *val {
Value::Tuple(ref v) => Ok(Value::Number(Number::Integer(v.len() as i64))),
ref non_tuple_val => Err(RuntimeError::GeneralRuntimeError(
format!("cannot get len of {:?}", non_tuple_val.get_type()),
)),
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_run_http_server(args: Vec<Value>) -> Result<(), RuntimeError> {
use hyper::server::{Request, Response, Server};
use hyper::header::ContentType;
use hyper::uri::RequestUri;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
use std::sync::PoisonError;
use ast_walk_interpreter::call_func;
let handler_val = &args[0];
let handler_func = match *handler_val {
Value::Function(ref f) => f,
_ => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler is not a Function".to_owned(),
))
}
};
let maybe_hyper_server = Server::http("0.0.0.0:8000");
if let Err(e) = maybe_hyper_server {
return Err(RuntimeError::GeneralRuntimeError(
format!("http_server: {}", e),
));
}
let server = maybe_hyper_server.unwrap();
// channel from server to interpreter
let (sender, receiver) = channel();
let sender_mutex = Mutex::new(sender);
thread::spawn(|| {
println!("http_server: listening on 0.0.0.0:8000 on a new thread");
let handle_result = server.handle(move |req: Request, mut res: Response| {
let sender = match sender_mutex.lock() {
Ok(sender) => sender,
Err(PoisonError { .. }) => panic!("http_server: threading error (lock poisoned)"),
};
if let RequestUri::AbsolutePath(path) = req.uri {
// channel from interpreter to server
let (rev_sender, rev_receiver) = channel();
if sender.send((path, rev_sender)).is_err() {
panic!("http_server: threading error (could not send on reverse channel)");
}
let response_string: String = match rev_receiver.recv() {
Ok(response_string) => response_string,
Err(_) => {
// assume some clean disconnect
return;
}
};
res.headers_mut().set(ContentType::html());
if res.send(response_string.as_bytes()).is_err() {
panic!("http_server: could not send response");
}
} else {
panic!("http_server: unknown kind of request");
}
});
if handle_result.is_err() {
panic!("http_server: could not handle requests");
}
});
loop {
match receiver.recv() {
Err(_) => {
// assume some clean disconnect
break;
}
Ok(msg) => {
let (path, sender) = msg;
let possible_response_value = call_func(handler_func, &[Value::String(path)])?;
let response_value = match possible_response_value {
None => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler \
function did not return a \
value"
.to_owned(),
));
}
Some(val) => val,
};
if sender.send(response_value.to_string()).is_err() |
}
}
}
Ok(())
}
| {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: threading error \
(could not send on reverse \
channel)"
.to_owned(),
));
} | conditional_block |
function.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::rc::Rc;
use std::cell::RefCell;
use value::*;
use ast;
use environment::Environment;
use runtime::RuntimeError;
#[derive(Clone, Debug)]
pub struct CallSign {
pub num_params: usize,
pub variadic: bool,
}
#[derive(Clone, Debug)]
pub enum | {
NativeVoid(CallSign, fn(Vec<Value>) -> Result<(), RuntimeError>),
NativeReturning(CallSign, fn(Vec<Value>) -> Result<Value, RuntimeError>),
User {
call_sign: CallSign,
param_names: Vec<String>,
body: Box<ast::StmtNode>,
env: Rc<RefCell<Environment>>,
},
}
impl Function {
pub fn get_call_sign(&self) -> CallSign {
match *self {
Function::NativeVoid(ref call_sign, _) |
Function::NativeReturning(ref call_sign, _) |
Function::User { ref call_sign, .. } => call_sign.clone(),
}
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_println(args: Vec<Value>) -> Result<(), RuntimeError> {
if args.is_empty() {
return Ok(());
}
if args.len() == 1 {
println!("{}", args[0]);
} else {
print!("{}", args[0]);
for arg in args.iter().skip(1) {
print!(" {}", arg);
}
println!("");
}
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert(args: Vec<Value>) -> Result<(), RuntimeError> {
let val = &args[0];
if !val.is_truthy() {
Err(RuntimeError::GeneralRuntimeError(
format!("assert: assertion failed for value {}", val),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert_eq(args: Vec<Value>) -> Result<(), RuntimeError> {
let (val1, val2) = (&args[0], &args[1]);
if val1 != val2 {
Err(RuntimeError::GeneralRuntimeError(
format!("assert_eq: {} != {}", val1, val2),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_len(args: Vec<Value>) -> Result<Value, RuntimeError> {
let val = &args[0];
match *val {
Value::Tuple(ref v) => Ok(Value::Number(Number::Integer(v.len() as i64))),
ref non_tuple_val => Err(RuntimeError::GeneralRuntimeError(
format!("cannot get len of {:?}", non_tuple_val.get_type()),
)),
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_run_http_server(args: Vec<Value>) -> Result<(), RuntimeError> {
use hyper::server::{Request, Response, Server};
use hyper::header::ContentType;
use hyper::uri::RequestUri;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
use std::sync::PoisonError;
use ast_walk_interpreter::call_func;
let handler_val = &args[0];
let handler_func = match *handler_val {
Value::Function(ref f) => f,
_ => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler is not a Function".to_owned(),
))
}
};
let maybe_hyper_server = Server::http("0.0.0.0:8000");
if let Err(e) = maybe_hyper_server {
return Err(RuntimeError::GeneralRuntimeError(
format!("http_server: {}", e),
));
}
let server = maybe_hyper_server.unwrap();
// channel from server to interpreter
let (sender, receiver) = channel();
let sender_mutex = Mutex::new(sender);
thread::spawn(|| {
println!("http_server: listening on 0.0.0.0:8000 on a new thread");
let handle_result = server.handle(move |req: Request, mut res: Response| {
let sender = match sender_mutex.lock() {
Ok(sender) => sender,
Err(PoisonError { .. }) => panic!("http_server: threading error (lock poisoned)"),
};
if let RequestUri::AbsolutePath(path) = req.uri {
// channel from interpreter to server
let (rev_sender, rev_receiver) = channel();
if sender.send((path, rev_sender)).is_err() {
panic!("http_server: threading error (could not send on reverse channel)");
}
let response_string: String = match rev_receiver.recv() {
Ok(response_string) => response_string,
Err(_) => {
// assume some clean disconnect
return;
}
};
res.headers_mut().set(ContentType::html());
if res.send(response_string.as_bytes()).is_err() {
panic!("http_server: could not send response");
}
} else {
panic!("http_server: unknown kind of request");
}
});
if handle_result.is_err() {
panic!("http_server: could not handle requests");
}
});
loop {
match receiver.recv() {
Err(_) => {
// assume some clean disconnect
break;
}
Ok(msg) => {
let (path, sender) = msg;
let possible_response_value = call_func(handler_func, &[Value::String(path)])?;
let response_value = match possible_response_value {
None => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler \
function did not return a \
value"
.to_owned(),
));
}
Some(val) => val,
};
if sender.send(response_value.to_string()).is_err() {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: threading error \
(could not send on reverse \
channel)"
.to_owned(),
));
}
}
}
}
Ok(())
}
| Function | identifier_name |
function.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::rc::Rc;
use std::cell::RefCell;
use value::*;
use ast;
use environment::Environment;
use runtime::RuntimeError;
#[derive(Clone, Debug)]
pub struct CallSign {
pub num_params: usize, | NativeVoid(CallSign, fn(Vec<Value>) -> Result<(), RuntimeError>),
NativeReturning(CallSign, fn(Vec<Value>) -> Result<Value, RuntimeError>),
User {
call_sign: CallSign,
param_names: Vec<String>,
body: Box<ast::StmtNode>,
env: Rc<RefCell<Environment>>,
},
}
impl Function {
pub fn get_call_sign(&self) -> CallSign {
match *self {
Function::NativeVoid(ref call_sign, _) |
Function::NativeReturning(ref call_sign, _) |
Function::User { ref call_sign, .. } => call_sign.clone(),
}
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_println(args: Vec<Value>) -> Result<(), RuntimeError> {
if args.is_empty() {
return Ok(());
}
if args.len() == 1 {
println!("{}", args[0]);
} else {
print!("{}", args[0]);
for arg in args.iter().skip(1) {
print!(" {}", arg);
}
println!("");
}
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert(args: Vec<Value>) -> Result<(), RuntimeError> {
let val = &args[0];
if !val.is_truthy() {
Err(RuntimeError::GeneralRuntimeError(
format!("assert: assertion failed for value {}", val),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_assert_eq(args: Vec<Value>) -> Result<(), RuntimeError> {
let (val1, val2) = (&args[0], &args[1]);
if val1 != val2 {
Err(RuntimeError::GeneralRuntimeError(
format!("assert_eq: {} != {}", val1, val2),
))
} else {
Ok(())
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_len(args: Vec<Value>) -> Result<Value, RuntimeError> {
let val = &args[0];
match *val {
Value::Tuple(ref v) => Ok(Value::Number(Number::Integer(v.len() as i64))),
ref non_tuple_val => Err(RuntimeError::GeneralRuntimeError(
format!("cannot get len of {:?}", non_tuple_val.get_type()),
)),
}
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn native_run_http_server(args: Vec<Value>) -> Result<(), RuntimeError> {
use hyper::server::{Request, Response, Server};
use hyper::header::ContentType;
use hyper::uri::RequestUri;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
use std::sync::PoisonError;
use ast_walk_interpreter::call_func;
let handler_val = &args[0];
let handler_func = match *handler_val {
Value::Function(ref f) => f,
_ => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler is not a Function".to_owned(),
))
}
};
let maybe_hyper_server = Server::http("0.0.0.0:8000");
if let Err(e) = maybe_hyper_server {
return Err(RuntimeError::GeneralRuntimeError(
format!("http_server: {}", e),
));
}
let server = maybe_hyper_server.unwrap();
// channel from server to interpreter
let (sender, receiver) = channel();
let sender_mutex = Mutex::new(sender);
thread::spawn(|| {
println!("http_server: listening on 0.0.0.0:8000 on a new thread");
let handle_result = server.handle(move |req: Request, mut res: Response| {
let sender = match sender_mutex.lock() {
Ok(sender) => sender,
Err(PoisonError { .. }) => panic!("http_server: threading error (lock poisoned)"),
};
if let RequestUri::AbsolutePath(path) = req.uri {
// channel from interpreter to server
let (rev_sender, rev_receiver) = channel();
if sender.send((path, rev_sender)).is_err() {
panic!("http_server: threading error (could not send on reverse channel)");
}
let response_string: String = match rev_receiver.recv() {
Ok(response_string) => response_string,
Err(_) => {
// assume some clean disconnect
return;
}
};
res.headers_mut().set(ContentType::html());
if res.send(response_string.as_bytes()).is_err() {
panic!("http_server: could not send response");
}
} else {
panic!("http_server: unknown kind of request");
}
});
if handle_result.is_err() {
panic!("http_server: could not handle requests");
}
});
loop {
match receiver.recv() {
Err(_) => {
// assume some clean disconnect
break;
}
Ok(msg) => {
let (path, sender) = msg;
let possible_response_value = call_func(handler_func, &[Value::String(path)])?;
let response_value = match possible_response_value {
None => {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: handler \
function did not return a \
value"
.to_owned(),
));
}
Some(val) => val,
};
if sender.send(response_value.to_string()).is_err() {
return Err(RuntimeError::GeneralRuntimeError(
"http_server: threading error \
(could not send on reverse \
channel)"
.to_owned(),
));
}
}
}
}
Ok(())
} | pub variadic: bool,
}
#[derive(Clone, Debug)]
pub enum Function { | random_line_split |
jquery.juicy.buttonselect.js | /*
* jUIcy Button Select
*
* Depends:
* jquery.ui.core.js
* jquery.ui.widget.js
* jquery.ui.button.js
*/
(function( $ ) {
$.widget( "juicy.buttonselect", {
options: {
name: '',
items: {},
wrapTemplate: '',
click: null
},
_create: function()
{
this.element.addClass( 'juicy-buttonselect' );
this._appendInputs();
},
_setOption: function( key, value )
{
$.Widget.prototype._setOption.apply( this, arguments );
switch( key ) {
case 'name':
case 'items':
case 'wrapTemplate':
this.refresh();
break;
}
},
destroy: function()
{
this.element
.removeClass( 'juicy-buttonselect juicy-buttonselect-single' )
.empty();
$.Widget.prototype.destroy.call( this );
},
/**
* settings = {
* type: radio|checkbox
* id: id attribute
* name: name attribute
* value: value attribute
* labelText: label text
* wrap: element to wrap around input
* checked: boolean
* }
*/
_newInput: function( target, settings )
{
var realTarget,
input =
$('<input />')
.attr( 'type', settings.type )
.attr( 'id', settings.id )
.attr( 'name', settings.name )
.attr( 'value', settings.value ),
label =
$('<label></label>')
.attr( 'for', settings.id )
.html( settings.labelText );
if ( settings.checked ) {
input.attr( 'checked', 'checked' );
}
if ( settings.wrap.length ) {
realTarget = $( settings.wrap ).appendTo( target );
} else {
realTarget = target;
}
realTarget.append( input, label );
return input;
},
_appendInputs: function()
{
var self = this,
o = this.options,
checked = true;
this.element.addClass( 'juicy-buttonselect-single' );
$.each( o.items, function( key, val ){
self._newInput(
self.element,
{
type: 'radio',
id: o.name + '-' + key,
name: o.name,
value: key,
labelText: val,
wrap: o.wrapTemplate,
checked: checked
}
)
.button()
.click( function(){
self._updButtons();
o.click();
});
checked = false;
});
self._updButtons();
},
_getButtonIcon: function( input )
{
if ( input.is(':checked') ) {
return 'ui-icon-bullet';
} else {
return 'ui-icon-radio-off';
}
},
_updButtons: function()
{
var self = this;
$( 'input', this.element )
.each( function() {
var input = $(this),
icon = self._getButtonIcon( input );
input
.button( 'refresh' )
.button( 'option', 'icons', {primary: icon} );
});
},
refresh: function()
{
this.element.empty();
this._appendInputs();
},
selectedValue: function( newValue )
{
if ( arguments.length ) {
$( 'input', this.element )
.each( function(){
$(this).removeAttr( 'checked' );
if ( $(this).val() == newValue ) {
$(this).attr( 'checked', 'checked' );
}
});
this._updButtons();
return null;
} else {
return $( 'input:checked', this.element ).val();
}
}
});
$.widget( "juicy.buttonselectmulti", $.juicy.buttonselect, {
destroy: function()
{
this.element.removeClass( 'juicy-buttonselectmulti' );
$.juicy.buttonselect.destroy.call( this );
|
_appendInputs: function()
{
var self = this,
o = this.options;
this.element.addClass( 'juicy-buttonselectmulti' );
$.each( o.items, function( key, val ){
self._newInput(
self.element,
{
type: 'checkbox',
id: o.name + '-' + key,
name: o.name,
value: key,
labelText: val,
wrap: o.wrapTemplate,
checked: true
}
)
.button()
.click( function() {
self._updButtons();
o.click();
});
});
self._updButtons();
},
_getButtonIcon: function( input )
{
if ( input.is(':checked') ) {
return 'ui-icon-circle-check';
} else {
return 'ui-icon-circle-plus';
}
},
selectedValue: function( newValue )
{
if ( arguments.length ) {
$( 'input', this.element )
.each( function(){
$(this).removeAttr( 'checked' );
if ( $.inArray( $(this).val(), newValue ) != -1 ) {
$(this).attr( 'checked', 'checked' );
}
});
this._updButtons();
return null;
} else {
var value = [];
$( 'input:checked', this.element )
.each( function(){
value.push( $(this).val() );
});
return value;
}
}
});
}( jQuery ) ); | },
| random_line_split |
jquery.juicy.buttonselect.js | /*
* jUIcy Button Select
*
* Depends:
* jquery.ui.core.js
* jquery.ui.widget.js
* jquery.ui.button.js
*/
(function( $ ) {
$.widget( "juicy.buttonselect", {
options: {
name: '',
items: {},
wrapTemplate: '',
click: null
},
_create: function()
{
this.element.addClass( 'juicy-buttonselect' );
this._appendInputs();
},
_setOption: function( key, value )
{
$.Widget.prototype._setOption.apply( this, arguments );
switch( key ) {
case 'name':
case 'items':
case 'wrapTemplate':
this.refresh();
break;
}
},
destroy: function()
{
this.element
.removeClass( 'juicy-buttonselect juicy-buttonselect-single' )
.empty();
$.Widget.prototype.destroy.call( this );
},
/**
* settings = {
* type: radio|checkbox
* id: id attribute
* name: name attribute
* value: value attribute
* labelText: label text
* wrap: element to wrap around input
* checked: boolean
* }
*/
_newInput: function( target, settings )
{
var realTarget,
input =
$('<input />')
.attr( 'type', settings.type )
.attr( 'id', settings.id )
.attr( 'name', settings.name )
.attr( 'value', settings.value ),
label =
$('<label></label>')
.attr( 'for', settings.id )
.html( settings.labelText );
if ( settings.checked ) {
input.attr( 'checked', 'checked' );
}
if ( settings.wrap.length ) {
realTarget = $( settings.wrap ).appendTo( target );
} else |
realTarget.append( input, label );
return input;
},
_appendInputs: function()
{
var self = this,
o = this.options,
checked = true;
this.element.addClass( 'juicy-buttonselect-single' );
$.each( o.items, function( key, val ){
self._newInput(
self.element,
{
type: 'radio',
id: o.name + '-' + key,
name: o.name,
value: key,
labelText: val,
wrap: o.wrapTemplate,
checked: checked
}
)
.button()
.click( function(){
self._updButtons();
o.click();
});
checked = false;
});
self._updButtons();
},
_getButtonIcon: function( input )
{
if ( input.is(':checked') ) {
return 'ui-icon-bullet';
} else {
return 'ui-icon-radio-off';
}
},
_updButtons: function()
{
var self = this;
$( 'input', this.element )
.each( function() {
var input = $(this),
icon = self._getButtonIcon( input );
input
.button( 'refresh' )
.button( 'option', 'icons', {primary: icon} );
});
},
refresh: function()
{
this.element.empty();
this._appendInputs();
},
selectedValue: function( newValue )
{
if ( arguments.length ) {
$( 'input', this.element )
.each( function(){
$(this).removeAttr( 'checked' );
if ( $(this).val() == newValue ) {
$(this).attr( 'checked', 'checked' );
}
});
this._updButtons();
return null;
} else {
return $( 'input:checked', this.element ).val();
}
}
});
$.widget( "juicy.buttonselectmulti", $.juicy.buttonselect, {
destroy: function()
{
this.element.removeClass( 'juicy-buttonselectmulti' );
$.juicy.buttonselect.destroy.call( this );
},
_appendInputs: function()
{
var self = this,
o = this.options;
this.element.addClass( 'juicy-buttonselectmulti' );
$.each( o.items, function( key, val ){
self._newInput(
self.element,
{
type: 'checkbox',
id: o.name + '-' + key,
name: o.name,
value: key,
labelText: val,
wrap: o.wrapTemplate,
checked: true
}
)
.button()
.click( function() {
self._updButtons();
o.click();
});
});
self._updButtons();
},
_getButtonIcon: function( input )
{
if ( input.is(':checked') ) {
return 'ui-icon-circle-check';
} else {
return 'ui-icon-circle-plus';
}
},
selectedValue: function( newValue )
{
if ( arguments.length ) {
$( 'input', this.element )
.each( function(){
$(this).removeAttr( 'checked' );
if ( $.inArray( $(this).val(), newValue ) != -1 ) {
$(this).attr( 'checked', 'checked' );
}
});
this._updButtons();
return null;
} else {
var value = [];
$( 'input:checked', this.element )
.each( function(){
value.push( $(this).val() );
});
return value;
}
}
});
}( jQuery ) ); | {
realTarget = target;
} | conditional_block |
tests.js | script('../node_modules/domready/ready.js', function () {
domready(function() {
sink('Basic', function(test, ok, before, after) {
test('should call from chained ready calls', 4, function() {
script.ready('jquery', function() {
ok(true, 'loaded from ready callback')
})
script.ready('jquery', function() {
ok(true, 'called jquery callback again')
})
.ready('jquery', function() {
ok(true, 'called ready on a chain')
})
script('../vendor/jquery.js', 'jquery', function() {
ok(true, 'loaded from base callback')
})
})
test('multiple files can be loaded at once', 1, function() {
script(['../demos/js/foo.js', '../demos/js/bar.js'], function() {
ok(true, 'foo and bar have been loaded')
})
})
test('ready should wait for multiple files by name', 1, function() {
script(['../demos/js/baz.js', '../demos/js/thunk.js'], 'bundle').ready('bundle', function() {
ok(true, 'batch has been loaded')
})
})
test('ready should wait for several batches by name', 1, function() {
script('../vendor/yui-utilities.js', 'yui') | })
test('ready should not call a duplicate callback', 1, function() {
script.ready(['yui', 'moomoo'], function() {
console.log('TWICE')
ok(true, 'found yui and moomoo again')
})
})
test('ready should not call a callback a third time', 1, function() {
script.ready(['yui', 'moomoo'], function() {
console.log('THREE')
ok(true, 'found yui and moomoo again')
})
})
test('should load a single file without extra arguments', 1, function () {
var err = false
try {
script('../vendor/yui-utilities.js')
} catch (ex) {
err = true
console.log('wtf ex', ex)
} finally {
ok(!err, 'no error')
}
})
test('should callback a duplicate file without loading the file', 1, function () {
script('../vendor/yui-utilities.js', function () {
ok(true, 'loaded yui twice. nice')
})
})
test('onerror', 1, function () {
script('waaaaaaaaaaaa', function () {
ok(true, 'no waaaa')
})
})
test('setting script path', 3, function () {
script.path('../vendor/')
script(['patha', 'pathb', 'http://ded.github.com/morpheus/morpheus.js'], function () {
ok(patha == true, 'loaded patha.js')
ok(pathb == true, 'loaded pathb.js')
ok(typeof morpheus !== 'undefined', 'loaded morpheus.js from http')
})
})
test('syncronous ordered loading', 2, function () {
script.order(['order-a', 'order-b', 'order-c'], 'ordered-id', function () {
ok(true, 'loaded each file in order')
console.log('loaded each file in order')
})
script.ready('ordered-id', function () {
console.log('readiness by id')
ok(ordera && orderb && orderc, 'done listen for readiness by id')
})
})
})
start()
})
}) | script('../vendor/mootools.js', 'moomoo')
script.ready(['yui', 'moomoo'], function() {
console.log('ONCE')
ok(true, 'multiple batch has been loaded')
}) | random_line_split |
maclearning.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.api.v2 import attributes
MAC_LEARNING = 'mac_learning_enabled'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
MAC_LEARNING: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Maclearning(object):
"""Extension class supporting port mac learning."""
@classmethod
def get_name(cls):
return "MAC Learning"
@classmethod
def get_alias(cls):
return "mac-learning"
@classmethod
def get_description(cls):
return "Provides mac learning capabilities"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/maclearning/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-05-1T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
| return {} | conditional_block |
|
maclearning.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.api.v2 import attributes
MAC_LEARNING = 'mac_learning_enabled'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
MAC_LEARNING: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Maclearning(object):
"""Extension class supporting port mac learning."""
@classmethod
def get_name(cls):
return "MAC Learning"
@classmethod
def get_alias(cls):
|
@classmethod
def get_description(cls):
return "Provides mac learning capabilities"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/maclearning/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-05-1T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| return "mac-learning" | identifier_body |
maclearning.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.api.v2 import attributes
MAC_LEARNING = 'mac_learning_enabled'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
MAC_LEARNING: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class | (object):
"""Extension class supporting port mac learning."""
@classmethod
def get_name(cls):
return "MAC Learning"
@classmethod
def get_alias(cls):
return "mac-learning"
@classmethod
def get_description(cls):
return "Provides mac learning capabilities"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/maclearning/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-05-1T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| Maclearning | identifier_name |
maclearning.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.api.v2 import attributes
MAC_LEARNING = 'mac_learning_enabled'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
MAC_LEARNING: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Maclearning(object):
"""Extension class supporting port mac learning."""
@classmethod
def get_name(cls):
return "MAC Learning"
@classmethod
def get_alias(cls):
return "mac-learning"
@classmethod
def get_description(cls):
return "Provides mac learning capabilities"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/maclearning/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-05-1T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {} | random_line_split |
|
linkedlist.rs | #![feature(associated_type_defaults)]
#![warn(clippy::linkedlist)]
#![allow(dead_code, clippy::needless_pass_by_value)]
extern crate alloc;
use alloc::collections::linked_list::LinkedList;
const C: LinkedList<i32> = LinkedList::new();
static S: LinkedList<i32> = LinkedList::new();
trait Foo {
type Baz = LinkedList<u8>;
fn foo(_: LinkedList<u8>);
const BAR: Option<LinkedList<u8>>;
}
// Ok, we don’t want to warn for implementations; see issue #605.
impl Foo for LinkedList<u8> {
fn foo(_: LinkedList<u8>) {}
const BAR: Option<LinkedList<u8>> = None;
}
struct Bar;
impl Bar {
fn foo(_: LinkedList<u8>) {} |
pub fn test(my_favourite_linked_list: LinkedList<u8>) {
println!("{:?}", my_favourite_linked_list)
}
pub fn test_ret() -> Option<LinkedList<u8>> {
unimplemented!();
}
pub fn test_local_not_linted() {
let _: LinkedList<u8>;
}
fn main() {
test(LinkedList::new());
test_local_not_linted();
}
|
} | identifier_body |
linkedlist.rs | #![feature(associated_type_defaults)]
#![warn(clippy::linkedlist)]
#![allow(dead_code, clippy::needless_pass_by_value)]
extern crate alloc;
use alloc::collections::linked_list::LinkedList;
const C: LinkedList<i32> = LinkedList::new();
static S: LinkedList<i32> = LinkedList::new();
trait Foo {
type Baz = LinkedList<u8>;
fn foo(_: LinkedList<u8>);
const BAR: Option<LinkedList<u8>>;
}
// Ok, we don’t want to warn for implementations; see issue #605.
impl Foo for LinkedList<u8> {
fn foo(_: LinkedList<u8>) {}
const BAR: Option<LinkedList<u8>> = None;
}
struct Bar;
impl Bar {
fn foo(_: LinkedList<u8>) {}
}
pub fn test(my_favourite_linked_list: LinkedList<u8>) {
println!("{:?}", my_favourite_linked_list)
}
pub fn test_ret() -> Option<LinkedList<u8>> {
unimplemented!();
}
pub fn test_local_not_linted() { | test_local_not_linted();
} | let _: LinkedList<u8>;
}
fn main() {
test(LinkedList::new()); | random_line_split |
linkedlist.rs | #![feature(associated_type_defaults)]
#![warn(clippy::linkedlist)]
#![allow(dead_code, clippy::needless_pass_by_value)]
extern crate alloc;
use alloc::collections::linked_list::LinkedList;
const C: LinkedList<i32> = LinkedList::new();
static S: LinkedList<i32> = LinkedList::new();
trait Foo {
type Baz = LinkedList<u8>;
fn foo(_: LinkedList<u8>);
const BAR: Option<LinkedList<u8>>;
}
// Ok, we don’t want to warn for implementations; see issue #605.
impl Foo for LinkedList<u8> {
fn foo(_: LinkedList<u8>) {}
const BAR: Option<LinkedList<u8>> = None;
}
struct Bar;
impl Bar {
fn foo(_: LinkedList<u8>) {}
}
pub fn test(my_favourite_linked_list: LinkedList<u8>) {
println!("{:?}", my_favourite_linked_list)
}
pub fn test_ret() -> Option<LinkedList<u8>> {
unimplemented!();
}
pub fn te | {
let _: LinkedList<u8>;
}
fn main() {
test(LinkedList::new());
test_local_not_linted();
}
| st_local_not_linted() | identifier_name |
check_static_recursion.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use ast_map;
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::{ast, ast_util};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) |
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
| {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
} | identifier_body |
check_static_recursion.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use ast_map;
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::{ast, ast_util};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> { | fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
} | random_line_split |
|
check_static_recursion.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use ast_map;
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::{ast, ast_util};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn | (&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
| visit_impl_item | identifier_name |
directive.ts | import * as host from '../utils/host';
import * as injector from '../utils/injector';
import * as input from '../utils/input';
import * as output from '../utils/output';
function parseSelector(selector: string) {
const regex = [
// {key: 'E', value: /^([a-zA-Z])$/},
{key: 'A', value: /^\[([a-zA-Z]+)\]$/},
{key: 'C', value: /^\.([a-zA-Z]+)$/}
];
for (let i = 0; i < regex.length; i++) {
const result = selector.match(regex[i].value); |
if (result !== null) {
return {restrict: regex[i].key, name: result[1]};
}
};
throw new Error(`Selector ${selector} could not be parsed`);
}
export function bootstrap(ngModule, target) {
const annotations = target.__annotations__;
const directive = annotations.directive;
const selector = parseSelector(directive.selector);
const hostBindings = host.parse(directive.host || {});
// Inject the services
injector.inject(ngModule, target);
ngModule
.controller(target.name, target)
.directive(selector.name, ['$rootScope', ($rootScope) => {
const declaration: any = {
restrict: selector.restrict,
scope: {},
bindToController: {},
controller: target.name,
controllerAs: '$ctrl',
link: (scope, el) => host.bind(scope, el, hostBindings)
};
// Bind inputs and outputs
input.bind(target, declaration);
output.bind($rootScope, target, declaration);
return declaration;
}]);
} | random_line_split |
|
directive.ts | import * as host from '../utils/host';
import * as injector from '../utils/injector';
import * as input from '../utils/input';
import * as output from '../utils/output';
function parseSelector(selector: string) {
const regex = [
// {key: 'E', value: /^([a-zA-Z])$/},
{key: 'A', value: /^\[([a-zA-Z]+)\]$/},
{key: 'C', value: /^\.([a-zA-Z]+)$/}
];
for (let i = 0; i < regex.length; i++) {
const result = selector.match(regex[i].value);
if (result !== null) {
return {restrict: regex[i].key, name: result[1]};
}
};
throw new Error(`Selector ${selector} could not be parsed`);
}
export function bootstrap(ngModule, target) | {
const annotations = target.__annotations__;
const directive = annotations.directive;
const selector = parseSelector(directive.selector);
const hostBindings = host.parse(directive.host || {});
// Inject the services
injector.inject(ngModule, target);
ngModule
.controller(target.name, target)
.directive(selector.name, ['$rootScope', ($rootScope) => {
const declaration: any = {
restrict: selector.restrict,
scope: {},
bindToController: {},
controller: target.name,
controllerAs: '$ctrl',
link: (scope, el) => host.bind(scope, el, hostBindings)
};
// Bind inputs and outputs
input.bind(target, declaration);
output.bind($rootScope, target, declaration);
return declaration;
}]);
} | identifier_body |
|
directive.ts | import * as host from '../utils/host';
import * as injector from '../utils/injector';
import * as input from '../utils/input';
import * as output from '../utils/output';
function parseSelector(selector: string) {
const regex = [
// {key: 'E', value: /^([a-zA-Z])$/},
{key: 'A', value: /^\[([a-zA-Z]+)\]$/},
{key: 'C', value: /^\.([a-zA-Z]+)$/}
];
for (let i = 0; i < regex.length; i++) {
const result = selector.match(regex[i].value);
if (result !== null) |
};
throw new Error(`Selector ${selector} could not be parsed`);
}
export function bootstrap(ngModule, target) {
const annotations = target.__annotations__;
const directive = annotations.directive;
const selector = parseSelector(directive.selector);
const hostBindings = host.parse(directive.host || {});
// Inject the services
injector.inject(ngModule, target);
ngModule
.controller(target.name, target)
.directive(selector.name, ['$rootScope', ($rootScope) => {
const declaration: any = {
restrict: selector.restrict,
scope: {},
bindToController: {},
controller: target.name,
controllerAs: '$ctrl',
link: (scope, el) => host.bind(scope, el, hostBindings)
};
// Bind inputs and outputs
input.bind(target, declaration);
output.bind($rootScope, target, declaration);
return declaration;
}]);
}
| {
return {restrict: regex[i].key, name: result[1]};
} | conditional_block |
directive.ts | import * as host from '../utils/host';
import * as injector from '../utils/injector';
import * as input from '../utils/input';
import * as output from '../utils/output';
function parseSelector(selector: string) {
const regex = [
// {key: 'E', value: /^([a-zA-Z])$/},
{key: 'A', value: /^\[([a-zA-Z]+)\]$/},
{key: 'C', value: /^\.([a-zA-Z]+)$/}
];
for (let i = 0; i < regex.length; i++) {
const result = selector.match(regex[i].value);
if (result !== null) {
return {restrict: regex[i].key, name: result[1]};
}
};
throw new Error(`Selector ${selector} could not be parsed`);
}
export function | (ngModule, target) {
const annotations = target.__annotations__;
const directive = annotations.directive;
const selector = parseSelector(directive.selector);
const hostBindings = host.parse(directive.host || {});
// Inject the services
injector.inject(ngModule, target);
ngModule
.controller(target.name, target)
.directive(selector.name, ['$rootScope', ($rootScope) => {
const declaration: any = {
restrict: selector.restrict,
scope: {},
bindToController: {},
controller: target.name,
controllerAs: '$ctrl',
link: (scope, el) => host.bind(scope, el, hostBindings)
};
// Bind inputs and outputs
input.bind(target, declaration);
output.bind($rootScope, target, declaration);
return declaration;
}]);
}
| bootstrap | identifier_name |
associated-types-ref-in-struct-literal.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test associated type references in a struct literal. Issue #20535.
pub trait Foo {
type Bar;
fn dummy(&self) { }
}
impl Foo for isize {
type Bar = isize;
}
struct Thing<F: Foo> {
a: F,
b: F::Bar,
}
fn | () {
let thing = Thing{a: 1, b: 2};
assert_eq!(thing.a + 1, thing.b);
}
| main | identifier_name |
associated-types-ref-in-struct-literal.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // except according to those terms.
// Test associated type references in a struct literal. Issue #20535.
pub trait Foo {
type Bar;
fn dummy(&self) { }
}
impl Foo for isize {
type Bar = isize;
}
struct Thing<F: Foo> {
a: F,
b: F::Bar,
}
fn main() {
let thing = Thing{a: 1, b: 2};
assert_eq!(thing.a + 1, thing.b);
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | random_line_split |
associated-types-ref-in-struct-literal.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test associated type references in a struct literal. Issue #20535.
pub trait Foo {
type Bar;
fn dummy(&self) { }
}
impl Foo for isize {
type Bar = isize;
}
struct Thing<F: Foo> {
a: F,
b: F::Bar,
}
fn main() | {
let thing = Thing{a: 1, b: 2};
assert_eq!(thing.a + 1, thing.b);
} | identifier_body |
|
0001_initial.py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('layers', '0002_initial_step2'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('import_id', models.BigIntegerField(null=True)),
('state', models.CharField(max_length=16)),
('date', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'date')),
('upload_dir', models.CharField(max_length=100, null=True)),
('name', models.CharField(max_length=64, null=True)),
('complete', models.BooleanField(default=False)), | ('metadata', models.TextField(null=True)),
('mosaic_time_regex', models.CharField(max_length=128, null=True)),
('mosaic_time_value', models.CharField(max_length=128, null=True)),
('mosaic_elev_regex', models.CharField(max_length=128, null=True)),
('mosaic_elev_value', models.CharField(max_length=128, null=True)),
('layer', models.ForeignKey(to='layers.Layer', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='UploadFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', models.FileField(upload_to=b'uploads')),
('slug', models.SlugField(blank=True)),
('upload', models.ForeignKey(blank=True, to='upload.Upload', null=True)),
],
),
] | ('session', models.TextField(null=True)), | random_line_split |
0001_initial.py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
| dependencies = [
('layers', '0002_initial_step2'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('import_id', models.BigIntegerField(null=True)),
('state', models.CharField(max_length=16)),
('date', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'date')),
('upload_dir', models.CharField(max_length=100, null=True)),
('name', models.CharField(max_length=64, null=True)),
('complete', models.BooleanField(default=False)),
('session', models.TextField(null=True)),
('metadata', models.TextField(null=True)),
('mosaic_time_regex', models.CharField(max_length=128, null=True)),
('mosaic_time_value', models.CharField(max_length=128, null=True)),
('mosaic_elev_regex', models.CharField(max_length=128, null=True)),
('mosaic_elev_value', models.CharField(max_length=128, null=True)),
('layer', models.ForeignKey(to='layers.Layer', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='UploadFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', models.FileField(upload_to=b'uploads')),
('slug', models.SlugField(blank=True)),
('upload', models.ForeignKey(blank=True, to='upload.Upload', null=True)),
],
),
] | identifier_body |
|
0001_initial.py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class | (migrations.Migration):
dependencies = [
('layers', '0002_initial_step2'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('import_id', models.BigIntegerField(null=True)),
('state', models.CharField(max_length=16)),
('date', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'date')),
('upload_dir', models.CharField(max_length=100, null=True)),
('name', models.CharField(max_length=64, null=True)),
('complete', models.BooleanField(default=False)),
('session', models.TextField(null=True)),
('metadata', models.TextField(null=True)),
('mosaic_time_regex', models.CharField(max_length=128, null=True)),
('mosaic_time_value', models.CharField(max_length=128, null=True)),
('mosaic_elev_regex', models.CharField(max_length=128, null=True)),
('mosaic_elev_value', models.CharField(max_length=128, null=True)),
('layer', models.ForeignKey(to='layers.Layer', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='UploadFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', models.FileField(upload_to=b'uploads')),
('slug', models.SlugField(blank=True)),
('upload', models.ForeignKey(blank=True, to='upload.Upload', null=True)),
],
),
]
| Migration | identifier_name |
app.py | # -*- coding: utf-8 -*-
from flask import (Flask, request, session, render_template, url_for, redirect,
jsonify)
app = Flask(__name__)
app.debug = True
app.secret_key = 'dummy secret key'
from flaskext.mitten import Mitten
mitten = Mitten(app) # apply Mitten
@app.route('/')
def | ():
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('index.html')
@app.route('/home/')
def home():
if not session.get('logged_in'):
return redirect(url_for('index'))
return render_template('home.html')
# A POST request is protected from csrf automatically
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
session.regenerate() # avoid session fixation
session['username'] = username
session['logged_in'] = True
return redirect(url_for('home'))
@app.route('/logout/')
def logout():
session.destroy()
return redirect(url_for('home'))
@mitten.csrf_exempt # excluded from csrf protection
@app.route('/public_api/', methods=['POST'])
def public_api():
return "POST was received successfully.", 200
@mitten.json
@app.route('/json_api/')
def json_api():
return jsonify(result='success')
@app.errorhandler(500)
def exception_handler(error):
return render_template('error.html')
if __name__ == '__main__':
app.run(host='localhost', port=8080)
| index | identifier_name |
app.py | # -*- coding: utf-8 -*-
from flask import (Flask, request, session, render_template, url_for, redirect,
jsonify)
app = Flask(__name__)
app.debug = True
app.secret_key = 'dummy secret key'
from flaskext.mitten import Mitten
mitten = Mitten(app) # apply Mitten
@app.route('/')
def index():
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('index.html')
@app.route('/home/')
def home():
if not session.get('logged_in'):
return redirect(url_for('index'))
return render_template('home.html')
# A POST request is protected from csrf automatically
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
session.regenerate() # avoid session fixation
session['username'] = username
session['logged_in'] = True
return redirect(url_for('home'))
@app.route('/logout/')
def logout():
session.destroy()
return redirect(url_for('home'))
@mitten.csrf_exempt # excluded from csrf protection
@app.route('/public_api/', methods=['POST'])
def public_api():
return "POST was received successfully.", 200
@mitten.json
@app.route('/json_api/')
def json_api():
return jsonify(result='success')
@app.errorhandler(500)
def exception_handler(error):
return render_template('error.html')
if __name__ == '__main__':
| app.run(host='localhost', port=8080) | conditional_block |
|
app.py | # -*- coding: utf-8 -*-
from flask import (Flask, request, session, render_template, url_for, redirect,
jsonify)
app = Flask(__name__)
app.debug = True
app.secret_key = 'dummy secret key'
from flaskext.mitten import Mitten
mitten = Mitten(app) # apply Mitten
@app.route('/')
def index():
|
@app.route('/home/')
def home():
if not session.get('logged_in'):
return redirect(url_for('index'))
return render_template('home.html')
# A POST request is protected from csrf automatically
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
session.regenerate() # avoid session fixation
session['username'] = username
session['logged_in'] = True
return redirect(url_for('home'))
@app.route('/logout/')
def logout():
session.destroy()
return redirect(url_for('home'))
@mitten.csrf_exempt # excluded from csrf protection
@app.route('/public_api/', methods=['POST'])
def public_api():
return "POST was received successfully.", 200
@mitten.json
@app.route('/json_api/')
def json_api():
return jsonify(result='success')
@app.errorhandler(500)
def exception_handler(error):
return render_template('error.html')
if __name__ == '__main__':
app.run(host='localhost', port=8080)
| if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('index.html') | identifier_body |
app.py | # -*- coding: utf-8 -*-
from flask import (Flask, request, session, render_template, url_for, redirect,
jsonify)
app = Flask(__name__)
app.debug = True
app.secret_key = 'dummy secret key'
from flaskext.mitten import Mitten
mitten = Mitten(app) # apply Mitten
@app.route('/')
def index():
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('index.html')
@app.route('/home/')
def home():
if not session.get('logged_in'):
return redirect(url_for('index'))
return render_template('home.html')
# A POST request is protected from csrf automatically
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
session.regenerate() # avoid session fixation
session['username'] = username
| return redirect(url_for('home'))
@app.route('/logout/')
def logout():
session.destroy()
return redirect(url_for('home'))
@mitten.csrf_exempt # excluded from csrf protection
@app.route('/public_api/', methods=['POST'])
def public_api():
return "POST was received successfully.", 200
@mitten.json
@app.route('/json_api/')
def json_api():
return jsonify(result='success')
@app.errorhandler(500)
def exception_handler(error):
return render_template('error.html')
if __name__ == '__main__':
app.run(host='localhost', port=8080) | session['logged_in'] = True
| random_line_split |
amazon-settings.ts | import { Component } from '@angular/core';
import * as _ from 'lodash';
// Providers
import { ConfigProvider } from '../../../../providers/config/config';
import { HomeIntegrationsProvider } from '../../../../providers/home-integrations/home-integrations';
@Component({
selector: 'page-amazon-settings',
templateUrl: 'amazon-settings.html',
})
export class | {
private serviceName: string = 'amazon';
public showInHome: any;
public service: any;
constructor(
private configProvider: ConfigProvider,
private homeIntegrationsProvider: HomeIntegrationsProvider
) {
this.service = _.filter(this.homeIntegrationsProvider.get(), { name: this.serviceName });
this.showInHome = !!this.service[0].show;
}
public showInHomeSwitch(): void {
let opts = {
showIntegration: { [this.serviceName] : this.showInHome }
};
this.homeIntegrationsProvider.updateConfig(this.serviceName, this.showInHome);
this.configProvider.set(opts);
}
}
| AmazonSettingsPage | identifier_name |
amazon-settings.ts | import { Component } from '@angular/core';
import * as _ from 'lodash';
// Providers
import { ConfigProvider } from '../../../../providers/config/config';
import { HomeIntegrationsProvider } from '../../../../providers/home-integrations/home-integrations';
@Component({
selector: 'page-amazon-settings',
templateUrl: 'amazon-settings.html',
})
export class AmazonSettingsPage {
private serviceName: string = 'amazon';
public showInHome: any;
public service: any;
constructor(
private configProvider: ConfigProvider,
private homeIntegrationsProvider: HomeIntegrationsProvider
) {
this.service = _.filter(this.homeIntegrationsProvider.get(), { name: this.serviceName });
this.showInHome = !!this.service[0].show; | };
this.homeIntegrationsProvider.updateConfig(this.serviceName, this.showInHome);
this.configProvider.set(opts);
}
} | }
public showInHomeSwitch(): void {
let opts = {
showIntegration: { [this.serviceName] : this.showInHome } | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
#![feature(box_syntax, int_uint)]
#![allow(unstable)]
#[macro_use] extern crate bitflags;
#[cfg(target_os="macos")]
extern crate cgl;
extern crate compositing;
extern crate geom;
extern crate gleam;
extern crate glutin;
extern crate layers;
extern crate libc;
extern crate msg;
extern crate time;
extern crate util;
extern crate egl;
use compositing::windowing::WindowEvent;
use geom::scale_factor::ScaleFactor;
use std::rc::Rc;
use window::Window;
use util::opts;
pub mod window;
pub trait NestedEventLoopListener {
fn handle_event_from_nested_event_loop(&mut self, event: WindowEvent) -> bool;
}
pub fn create_window() -> Rc<Window> {
// Read command-line options.
let opts = opts::get();
let foreground = opts.output_file.is_none();
let scale_factor = opts.device_pixels_per_px.unwrap_or(ScaleFactor(1.0));
let size = opts.initial_window_size.as_f32() * scale_factor;
// Open a window.
Window::new(foreground, size.as_uint().cast().unwrap())
} |
//! A simple application that uses glutin to open a window for Servo to display in. | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A simple application that uses glutin to open a window for Servo to display in.
#![feature(box_syntax, int_uint)]
#![allow(unstable)]
#[macro_use] extern crate bitflags;
#[cfg(target_os="macos")]
extern crate cgl;
extern crate compositing;
extern crate geom;
extern crate gleam;
extern crate glutin;
extern crate layers;
extern crate libc;
extern crate msg;
extern crate time;
extern crate util;
extern crate egl;
use compositing::windowing::WindowEvent;
use geom::scale_factor::ScaleFactor;
use std::rc::Rc;
use window::Window;
use util::opts;
pub mod window;
pub trait NestedEventLoopListener {
fn handle_event_from_nested_event_loop(&mut self, event: WindowEvent) -> bool;
}
pub fn | () -> Rc<Window> {
// Read command-line options.
let opts = opts::get();
let foreground = opts.output_file.is_none();
let scale_factor = opts.device_pixels_per_px.unwrap_or(ScaleFactor(1.0));
let size = opts.initial_window_size.as_f32() * scale_factor;
// Open a window.
Window::new(foreground, size.as_uint().cast().unwrap())
}
| create_window | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A simple application that uses glutin to open a window for Servo to display in.
#![feature(box_syntax, int_uint)]
#![allow(unstable)]
#[macro_use] extern crate bitflags;
#[cfg(target_os="macos")]
extern crate cgl;
extern crate compositing;
extern crate geom;
extern crate gleam;
extern crate glutin;
extern crate layers;
extern crate libc;
extern crate msg;
extern crate time;
extern crate util;
extern crate egl;
use compositing::windowing::WindowEvent;
use geom::scale_factor::ScaleFactor;
use std::rc::Rc;
use window::Window;
use util::opts;
pub mod window;
pub trait NestedEventLoopListener {
fn handle_event_from_nested_event_loop(&mut self, event: WindowEvent) -> bool;
}
pub fn create_window() -> Rc<Window> | {
// Read command-line options.
let opts = opts::get();
let foreground = opts.output_file.is_none();
let scale_factor = opts.device_pixels_per_px.unwrap_or(ScaleFactor(1.0));
let size = opts.initial_window_size.as_f32() * scale_factor;
// Open a window.
Window::new(foreground, size.as_uint().cast().unwrap())
} | identifier_body |
|
testifacecache.py | #!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust | from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class TestIfaceCache(BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified)
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
unittest.main() | from zeroinstall.injector.namespaces import config_site | random_line_split |
testifacecache.py | #!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust
from zeroinstall.injector.namespaces import config_site
from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class TestIfaceCache(BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified)
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
| unittest.main() | conditional_block |
|
testifacecache.py | #!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust
from zeroinstall.injector.namespaces import config_site
from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class TestIfaceCache(BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
|
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
unittest.main()
| iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified) | identifier_body |
testifacecache.py | #!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust
from zeroinstall.injector.namespaces import config_site
from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class | (BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified)
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
unittest.main()
| TestIfaceCache | identifier_name |
acceptancetest.py | import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
# return hash
return file, md5_hash_output
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
|
def make_txt_file():
# writes a text file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdir("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main() | path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False | conditional_block |
acceptancetest.py | import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
| # return hash
return file, md5_hash_output
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False
def make_txt_file():
# writes a text file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdir("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main() | print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.