code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if not text1: # Just add some text (speedup). return [(self.DIFF_INSERT, text2)] if not text2: # Just delete some text (speedup). return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: # Shorter text is inside the longer text (speedup). diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] # Swap insertions for deletions if diff is reversed. if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: # Single character string. # After the previous speedup, the character can't be an equality. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] # Check to see if the problem can be split in two. hm = self.diff_halfMatch(text1, text2) if hm: # A half-match was found, sort out the return data. (text1_a, text1_b, text2_a, text2_b, mid_common) = hm # Send both pairs off for separate processing. diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) # Merge the results. return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline)
def diff_compute(self, text1, text2, checklines, deadline)
Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes.
1.476339
1.440802
1.024665
# Scan the text on a line-by-line basis first. (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) # Convert the diff back to original text. self.diff_charsToLines(diffs, linearray) # Eliminate freak matches (e.g. blank lines) self.diff_cleanupSemantic(diffs) # Rediff any replacement blocks, this time character-by-character. # Add a dummy entry at the end. diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete >= 1 and count_insert >= 1: # Delete the offending records and add the merged ones. subDiff = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = subDiff pointer = pointer - count_delete - count_insert + len(subDiff) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() # Remove the dummy entry at the end. return diffs
def diff_lineMode(self, text1, text2, deadline)
Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes.
1.56459
1.446713
1.081479
text1a = text1[:x] text2a = text2[:y] text1b = text1[x:] text2b = text2[y:] # Compute both diffs serially. diffs = self.diff_main(text1a, text2a, False, deadline) diffsb = self.diff_main(text1b, text2b, False, deadline) return diffs + diffsb
def diff_bisectSplit(self, text1, text2, x, y, deadline)
Given the location of the 'middle snake', split the diff in two parts and recurse. Args: text1: Old string to be diffed. text2: New string to be diffed. x: Index of split point in text1. y: Index of split point in text2. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples.
2.017747
1.933376
1.043639
lineArray = [] # e.g. lineArray[4] == "Hello\n" lineHash = {} # e.g. lineHash["Hello\n"] == 4 # "\x00" is a valid character, but various debuggers don't like it. # So we'll insert a junk entry to avoid generating a null character. lineArray.append('') def diff_linesToCharsMunge(text): chars = [] # Walk the text, pulling out a substring for each line. # text.split('\n') would would temporarily double our memory footprint. # Modifying text would create many large strings to garbage collect. lineStart = 0 lineEnd = -1 while lineEnd < len(text) - 1: lineEnd = text.find('\n', lineStart) if lineEnd == -1: lineEnd = len(text) - 1 line = text[lineStart:lineEnd + 1] if line in lineHash: chars.append(chr(lineHash[line])) else: if len(lineArray) == maxLines: # Bail out at 1114111 because chr(1114112) throws. line = text[lineStart:] lineEnd = len(text) lineArray.append(line) lineHash[line] = len(lineArray) - 1 chars.append(chr(len(lineArray) - 1)) lineStart = lineEnd + 1 return "".join(chars) # Allocate 2/3rds of the space for text1, the rest for text2. maxLines = 666666 chars1 = diff_linesToCharsMunge(text1) maxLines = 1114111 chars2 = diff_linesToCharsMunge(text2) return (chars1, chars2, lineArray)
def diff_linesToChars(self, text1, text2)
Split two texts into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Args: text1: First string. text2: Second string. Returns: Three element tuple, containing the encoded text1, the encoded text2 and the array of unique strings. The zeroth element of the array of unique strings is intentionally blank.
2.134912
2.076731
1.028016
for i in range(len(diffs)): text = [] for char in diffs[i][1]: text.append(lineArray[ord(char)]) diffs[i] = (diffs[i][0], "".join(text))
def diff_charsToLines(self, diffs, lineArray)
Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings.
3.067448
2.53356
1.210726
# Quick check for common null cases. if not text1 or not text2 or text1[0] != text2[0]: return 0 # Binary search. # Performance analysis: https://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerstart = 0 while pointermin < pointermid: if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: pointermin = pointermid pointerstart = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid
def diff_commonPrefix(self, text1, text2)
Determine the common prefix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the start of each string.
1.885141
2.057483
0.916236
# Quick check for common null cases. if not text1 or not text2 or text1[-1] != text2[-1]: return 0 # Binary search. # Performance analysis: https://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if (text1[-pointermid:len(text1) - pointerend] == text2[-pointermid:len(text2) - pointerend]): pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid
def diff_commonSuffix(self, text1, text2)
Determine the common suffix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the end of each string.
2.219869
2.315563
0.958674
# Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) # Eliminate the null case. if text1_length == 0 or text2_length == 0: return 0 # Truncate the longer string. if text1_length > text2_length: text1 = text1[-text2_length:] elif text1_length < text2_length: text2 = text2[:text1_length] text_length = min(text1_length, text2_length) # Quick check for the worst case. if text1 == text2: return text_length # Start by looking for a single character match # and increase length until no match is found. # Performance analysis: https://neil.fraser.name/news/2010/11/04/ best = 0 length = 1 while True: pattern = text1[-length:] found = text2.find(pattern) if found == -1: return best length += found if found == 0 or text1[-length:] == text2[:length]: best = length length += 1
def diff_commonOverlap(self, text1, text2)
Determine if the suffix of one string is the prefix of another. Args: text1 First string. text2 Second string. Returns: The number of characters common to the end of the first string and the start of the second string.
1.679701
1.72428
0.974146
if self.Diff_Timeout <= 0: # Don't risk returning a non-optimal diff if we have unlimited time. return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None # Pointless. def diff_halfMatchI(longtext, shorttext, i): seed = longtext[i:i + len(longtext) // 4] best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None # First check if the second quarter is the seed for a half-match. hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) # Check again based on the third quarter. hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: # Both matched. Select the longest. if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 # A half-match was found, sort out the return data. if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_halfMatch(self, text1, text2)
Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match.
1.555203
1.506405
1.032393
def diff_cleanupSemanticScore(one, two): if not one or not two: # Edges are the best. return 6 # Each port of this function behaves slightly differently due to # subtle differences in each language's definition of things like # 'whitespace'. Since this function's purpose is largely cosmetic, # the choice has been made to use each language's native features # rather than force total conformity. char1 = one[-1] char2 = two[0] nonAlphaNumeric1 = not char1.isalnum() nonAlphaNumeric2 = not char2.isalnum() whitespace1 = nonAlphaNumeric1 and char1.isspace() whitespace2 = nonAlphaNumeric2 and char2.isspace() lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n") lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n") blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) if blankLine1 or blankLine2: # Five points for blank lines. return 5 elif lineBreak1 or lineBreak2: # Four points for line breaks. return 4 elif nonAlphaNumeric1 and not whitespace1 and whitespace2: # Three points for end of sentences. return 3 elif whitespace1 or whitespace2: # Two points for whitespace. return 2 elif nonAlphaNumeric1 or nonAlphaNumeric2: # One point for non-alphanumeric. return 1 return 0 pointer = 1 # Intentionally ignore the first and last element (don't need checking). while pointer < len(diffs) - 1: if (diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL): # This is a single edit surrounded by equalities. equality1 = diffs[pointer - 1][1] edit = diffs[pointer][1] equality2 = diffs[pointer + 1][1] # First, shift the edit as far left as possible. commonOffset = self.diff_commonSuffix(equality1, edit) if commonOffset: commonString = edit[-commonOffset:] equality1 = equality1[:-commonOffset] edit = commonString + edit[:-commonOffset] equality2 = commonString + equality2 # Second, step character by character right, looking for the best fit. bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 bestScore = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) while edit and equality2 and edit[0] == equality2[0]: equality1 += edit[0] edit = edit[1:] + equality2[0] equality2 = equality2[1:] score = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) # The >= encourages trailing rather than leading whitespace on edits. if score >= bestScore: bestScore = score bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 if diffs[pointer - 1][1] != bestEquality1: # We have an improvement, save it back to the diff. if bestEquality1: diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) else: del diffs[pointer - 1] pointer -= 1 diffs[pointer] = (diffs[pointer][0], bestEdit) if bestEquality2: diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) else: del diffs[pointer + 1] pointer -= 1 pointer += 1
def diff_cleanupSemanticLossless(self, diffs)
Look for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. Args: diffs: Array of diff tuples.
1.953232
1.583225
1.233705
changes = False equalities = [] # Stack of indices where equalities are found. lastEquality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. pre_ins = False # Is there an insertion operation before the last equality. pre_del = False # Is there a deletion operation before the last equality. post_ins = False # Is there an insertion operation after the last equality. post_del = False # Is there a deletion operation after the last equality. while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. if (len(diffs[pointer][1]) < self.Diff_EditCost and (post_ins or post_del)): # Candidate found. equalities.append(pointer) pre_ins = post_ins pre_del = post_del lastEquality = diffs[pointer][1] else: # Not a candidate, and can never become one. equalities = [] lastEquality = None post_ins = post_del = False else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_DELETE: post_del = True else: post_ins = True # Five types to be split: # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> # <ins>A</ins>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<ins>C</ins> # <ins>A</del>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<del>C</del> if lastEquality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastEquality) < self.Diff_EditCost / 2) and (pre_ins + pre_del + post_ins + post_del) == 3)): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) equalities.pop() # Throw away the equality we just deleted. lastEquality = None if pre_ins and pre_del: # No changes made which could affect previous entry, keep going. post_ins = post_del = True equalities = [] else: if len(equalities): equalities.pop() # Throw away the previous equality. if len(equalities): pointer = equalities[-1] else: pointer = -1 post_ins = post_del = False changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs)
def diff_cleanupEfficiency(self, diffs)
Reduce the number of edits by eliminating operationally trivial equalities. Args: diffs: Array of diff tuples.
1.638592
1.593156
1.028519
chars1 = 0 chars2 = 0 last_chars1 = 0 last_chars2 = 0 for x in range(len(diffs)): (op, text) = diffs[x] if op != self.DIFF_INSERT: # Equality or deletion. chars1 += len(text) if op != self.DIFF_DELETE: # Equality or insertion. chars2 += len(text) if chars1 > loc: # Overshot the location. break last_chars1 = chars1 last_chars2 = chars2 if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE: # The location was deleted. return last_chars2 # Add the remaining len(character). return last_chars2 + (loc - last_chars1)
def diff_xIndex(self, diffs, loc)
loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 Args: diffs: Array of diff tuples. loc: Location within text1. Returns: Location within text2.
2.597517
2.379667
1.091547
html = [] for (op, data) in diffs: text = (data.replace("&", "&amp;").replace("<", "&lt;") .replace(">", "&gt;").replace("\n", "&para;<br>")) if op == self.DIFF_INSERT: html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text) elif op == self.DIFF_DELETE: html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text) elif op == self.DIFF_EQUAL: html.append("<span>%s</span>" % text) return "".join(html)
def diff_prettyHtml(self, diffs)
Convert a diff array into a pretty HTML report. Args: diffs: Array of diff tuples. Returns: HTML representation.
1.880726
1.824438
1.030852
text = [] for (op, data) in diffs: if op != self.DIFF_INSERT: text.append(data) return "".join(text)
def diff_text1(self, diffs)
Compute and return the source text (all equalities and deletions). Args: diffs: Array of diff tuples. Returns: Source text.
3.413825
3.699046
0.922894
text = [] for (op, data) in diffs: if op != self.DIFF_DELETE: text.append(data) return "".join(text)
def diff_text2(self, diffs)
Compute and return the destination text (all equalities and insertions). Args: diffs: Array of diff tuples. Returns: Destination text.
3.500812
3.733959
0.93756
levenshtein = 0 insertions = 0 deletions = 0 for (op, data) in diffs: if op == self.DIFF_INSERT: insertions += len(data) elif op == self.DIFF_DELETE: deletions += len(data) elif op == self.DIFF_EQUAL: # A deletion and an insertion is one substitution. levenshtein += max(insertions, deletions) insertions = 0 deletions = 0 levenshtein += max(insertions, deletions) return levenshtein
def diff_levenshtein(self, diffs)
Compute the Levenshtein distance; the number of inserted, deleted or substituted characters. Args: diffs: Array of diff tuples. Returns: Number of changes.
1.730898
1.589036
1.089275
diffs = [] pointer = 0 # Cursor in text1 tokens = delta.split("\t") for token in tokens: if token == "": # Blank tokens are ok (from a trailing \t). continue # Each token begins with a one character parameter which specifies the # operation of this token (delete, insert, equality). param = token[1:] if token[0] == "+": param = urllib.parse.unquote(param) diffs.append((self.DIFF_INSERT, param)) elif token[0] == "-" or token[0] == "=": try: n = int(param) except ValueError: raise ValueError("Invalid number in diff_fromDelta: " + param) if n < 0: raise ValueError("Negative number in diff_fromDelta: " + param) text = text1[pointer : pointer + n] pointer += n if token[0] == "=": diffs.append((self.DIFF_EQUAL, text)) else: diffs.append((self.DIFF_DELETE, text)) else: # Anything else is an error. raise ValueError("Invalid diff operation in diff_fromDelta: " + token[0]) if pointer != len(text1): raise ValueError( "Delta length (%d) does not equal source text length (%d)." % (pointer, len(text1))) return diffs
def diff_fromDelta(self, text1, delta)
Given the original text1, and an encoded string which describes the operations required to transform text1 into text2, compute the full diff. Args: text1: Source string for the diff. delta: Delta text. Returns: Array of diff tuples. Raises: ValueError: If invalid input.
2.398691
2.134366
1.123843
# Check for null inputs. if text == None or pattern == None: raise ValueError("Null inputs. (match_main)") loc = max(0, min(loc, len(text))) if text == pattern: # Shortcut (potentially not guaranteed by the algorithm) return 0 elif not text: # Nothing to match. return -1 elif text[loc:loc + len(pattern)] == pattern: # Perfect match at the perfect spot! (Includes case of null pattern) return loc else: # Do a fuzzy compare. match = self.match_bitap(text, pattern, loc) return match
def match_main(self, text, pattern, loc)
Locate the best instance of 'pattern' in 'text' near 'loc'. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1.
4.822697
4.12276
1.169774
# Python doesn't have a maxint limit, so ignore this check. #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: # raise ValueError("Pattern too long for this application.") # Initialise the alphabet. s = self.match_alphabet(pattern) def match_bitapScore(e, x): accuracy = float(e) / len(pattern) proximity = abs(loc - x) if not self.Match_Distance: # Dodge divide by zero error. return proximity and 1.0 or accuracy return accuracy + (proximity / float(self.Match_Distance)) # Highest score beyond which we give up. score_threshold = self.Match_Threshold # Is there a nearby exact match? (speedup) best_loc = text.find(pattern, loc) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # What about in the other direction? (speedup) best_loc = text.rfind(pattern, loc + len(pattern)) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # Initialise the bit arrays. matchmask = 1 << (len(pattern) - 1) best_loc = -1 bin_max = len(pattern) + len(text) # Empty initialization added to appease pychecker. last_rd = None for d in range(len(pattern)): # Scan for the best match each iteration allows for one more error. # Run a binary search to determine how far from 'loc' we can stray at # this error level. bin_min = 0 bin_mid = bin_max while bin_min < bin_mid: if match_bitapScore(d, loc + bin_mid) <= score_threshold: bin_min = bin_mid else: bin_max = bin_mid bin_mid = (bin_max - bin_min) // 2 + bin_min # Use the result from this iteration as the maximum for the next. bin_max = bin_mid start = max(1, loc - bin_mid + 1) finish = min(loc + bin_mid, len(text)) + len(pattern) rd = [0] * (finish + 2) rd[finish + 1] = (1 << d) - 1 for j in range(finish, start - 1, -1): if len(text) <= j - 1: # Out of range. charMatch = 0 else: charMatch = s.get(text[j - 1], 0) if d == 0: # First pass: exact match. rd[j] = ((rd[j + 1] << 1) | 1) & charMatch else: # Subsequent passes: fuzzy match. rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] if rd[j] & matchmask: score = match_bitapScore(d, j - 1) # This match will almost certainly be better than any existing match. # But check anyway. if score <= score_threshold: # Told you so. score_threshold = score best_loc = j - 1 if best_loc > loc: # When passing loc, don't exceed our current distance from loc. start = max(1, 2 * loc - best_loc) else: # Already passed loc, downhill from here on in. break # No hope for a (better) match at greater error levels. if match_bitapScore(d + 1, loc) > score_threshold: break last_rd = rd return best_loc
def match_bitap(self, text, pattern, loc)
Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1.
1.869888
1.874806
0.997377
s = {} for char in pattern: s[char] = 0 for i in range(len(pattern)): s[pattern[i]] |= 1 << (len(pattern) - i - 1) return s
def match_alphabet(self, pattern)
Initialise the alphabet for the Bitap algorithm. Args: pattern: The text to encode. Returns: Hash of character locations.
3.148313
2.794329
1.126679
if len(text) == 0: return pattern = text[patch.start2 : patch.start2 + patch.length1] padding = 0 # Look for the first and last matches of pattern in text. If two different # matches are found, increase the pattern length. while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits == 0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin - self.Patch_Margin)): padding += self.Patch_Margin pattern = text[max(0, patch.start2 - padding) : patch.start2 + patch.length1 + padding] # Add one chunk for good luck. padding += self.Patch_Margin # Add the prefix. prefix = text[max(0, patch.start2 - padding) : patch.start2] if prefix: patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)] # Add the suffix. suffix = text[patch.start2 + patch.length1 : patch.start2 + patch.length1 + padding] if suffix: patch.diffs.append((self.DIFF_EQUAL, suffix)) # Roll back the start points. patch.start1 -= len(prefix) patch.start2 -= len(prefix) # Extend lengths. patch.length1 += len(prefix) + len(suffix) patch.length2 += len(prefix) + len(suffix)
def patch_addContext(self, patch, text)
Increase the context until it is unique, but don't let the pattern expand beyond Match_MaxBits. Args: patch: The patch to grow. text: Source text.
2.972015
2.488733
1.194188
patchesCopy = [] for patch in patches: patchCopy = patch_obj() # No need to deep copy the tuples since they are immutable. patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 patchesCopy.append(patchCopy) return patchesCopy
def patch_deepCopy(self, patches)
Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects.
2.674103
2.759019
0.969223
if not patches: return (text, []) # Deep copy the patches so that no changes are made to originals. patches = self.patch_deepCopy(patches) nullPadding = self.patch_addPadding(patches) text = nullPadding + text + nullPadding self.patch_splitMax(patches) # delta keeps track of the offset between the expected and actual location # of the previous patch. If there are patches expected at positions 10 and # 20, but the first patch was found at 12, delta is 2 and the second patch # has an effective expected position of 22. delta = 0 results = [] for patch in patches: expected_loc = patch.start2 + delta text1 = self.diff_text1(patch.diffs) end_loc = -1 if len(text1) > self.Match_MaxBits: # patch_splitMax will only provide an oversized pattern in the case of # a monster delete. start_loc = self.match_main(text, text1[:self.Match_MaxBits], expected_loc) if start_loc != -1: end_loc = self.match_main(text, text1[-self.Match_MaxBits:], expected_loc + len(text1) - self.Match_MaxBits) if end_loc == -1 or start_loc >= end_loc: # Can't find valid trailing context. Drop this patch. start_loc = -1 else: start_loc = self.match_main(text, text1, expected_loc) if start_loc == -1: # No match found. :( results.append(False) # Subtract the delta for this failed patch from subsequent patches. delta -= patch.length2 - patch.length1 else: # Found a match. :) results.append(True) delta = start_loc - expected_loc if end_loc == -1: text2 = text[start_loc : start_loc + len(text1)] else: text2 = text[start_loc : end_loc + self.Match_MaxBits] if text1 == text2: # Perfect match, just shove the replacement text in. text = (text[:start_loc] + self.diff_text2(patch.diffs) + text[start_loc + len(text1):]) else: # Imperfect match. # Run a diff to get a framework of equivalent indices. diffs = self.diff_main(text1, text2, False) if (len(text1) > self.Match_MaxBits and self.diff_levenshtein(diffs) / float(len(text1)) > self.Patch_DeleteThreshold): # The end points match, but the content is unacceptably bad. results[-1] = False else: self.diff_cleanupSemanticLossless(diffs) index1 = 0 for (op, data) in patch.diffs: if op != self.DIFF_EQUAL: index2 = self.diff_xIndex(diffs, index1) if op == self.DIFF_INSERT: # Insertion text = text[:start_loc + index2] + data + text[start_loc + index2:] elif op == self.DIFF_DELETE: # Deletion text = text[:start_loc + index2] + text[start_loc + self.diff_xIndex(diffs, index1 + len(data)):] if op != self.DIFF_DELETE: index1 += len(data) # Strip the padding off. text = text[len(nullPadding):-len(nullPadding)] return (text, results)
def patch_apply(self, patches, text)
Merge a set of patches onto the text. Return a patched text, as well as a list of true/false values indicating which patches were applied. Args: patches: Array of Patch objects. text: Old text. Returns: Two element Array, containing the new text and an array of boolean values.
1.902128
1.681719
1.131062
paddingLength = self.Patch_Margin nullPadding = "" for x in range(1, paddingLength + 1): nullPadding += chr(x) # Bump all the patches forward. for patch in patches: patch.start1 += paddingLength patch.start2 += paddingLength # Add some padding on start of first diff. patch = patches[0] diffs = patch.diffs if not diffs or diffs[0][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.insert(0, (self.DIFF_EQUAL, nullPadding)) patch.start1 -= paddingLength # Should be 0. patch.start2 -= paddingLength # Should be 0. patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[0][1]): # Grow first equality. extraLength = paddingLength - len(diffs[0][1]) newText = nullPadding[len(diffs[0][1]):] + diffs[0][1] diffs[0] = (diffs[0][0], newText) patch.start1 -= extraLength patch.start2 -= extraLength patch.length1 += extraLength patch.length2 += extraLength # Add some padding on end of last diff. patch = patches[-1] diffs = patch.diffs if not diffs or diffs[-1][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.append((self.DIFF_EQUAL, nullPadding)) patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[-1][1]): # Grow last equality. extraLength = paddingLength - len(diffs[-1][1]) newText = diffs[-1][1] + nullPadding[:extraLength] diffs[-1] = (diffs[-1][0], newText) patch.length1 += extraLength patch.length2 += extraLength return nullPadding
def patch_addPadding(self, patches)
Add some padding on text start and end so that edges can match something. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. Returns: The padding string added to each side.
1.906536
1.85043
1.030321
patch_size = self.Match_MaxBits if patch_size == 0: # Python has the option of not splitting strings due to its ability # to handle integers of arbitrary precision. return for x in range(len(patches)): if patches[x].length1 <= patch_size: continue bigpatch = patches[x] # Remove the big old patch. del patches[x] x -= 1 start1 = bigpatch.start1 start2 = bigpatch.start2 precontext = '' while len(bigpatch.diffs) != 0: # Create one of several smaller patches. patch = patch_obj() empty = True patch.start1 = start1 - len(precontext) patch.start2 = start2 - len(precontext) if precontext: patch.length1 = patch.length2 = len(precontext) patch.diffs.append((self.DIFF_EQUAL, precontext)) while (len(bigpatch.diffs) != 0 and patch.length1 < patch_size - self.Patch_Margin): (diff_type, diff_text) = bigpatch.diffs[0] if diff_type == self.DIFF_INSERT: # Insertions are harmless. patch.length2 += len(diff_text) start2 += len(diff_text) patch.diffs.append(bigpatch.diffs.pop(0)) empty = False elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and patch.diffs[0][0] == self.DIFF_EQUAL and len(diff_text) > 2 * patch_size): # This is a large deletion. Let it pass in one chunk. patch.length1 += len(diff_text) start1 += len(diff_text) empty = False patch.diffs.append((diff_type, diff_text)) del bigpatch.diffs[0] else: # Deletion or equality. Only take as much as we can stomach. diff_text = diff_text[:patch_size - patch.length1 - self.Patch_Margin] patch.length1 += len(diff_text) start1 += len(diff_text) if diff_type == self.DIFF_EQUAL: patch.length2 += len(diff_text) start2 += len(diff_text) else: empty = False patch.diffs.append((diff_type, diff_text)) if diff_text == bigpatch.diffs[0][1]: del bigpatch.diffs[0] else: bigpatch.diffs[0] = (bigpatch.diffs[0][0], bigpatch.diffs[0][1][len(diff_text):]) # Compute the head context for the next patch. precontext = self.diff_text2(patch.diffs) precontext = precontext[-self.Patch_Margin:] # Append the end context for this patch. postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin] if postcontext: patch.length1 += len(postcontext) patch.length2 += len(postcontext) if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL: patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] + postcontext) else: patch.diffs.append((self.DIFF_EQUAL, postcontext)) if not empty: x += 1 patches.insert(x, patch)
def patch_splitMax(self, patches)
Look through the patches and break up any which are longer than the maximum limit of the match algorithm. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects.
1.997051
1.973434
1.011967
text = [] for patch in patches: text.append(str(patch)) return "".join(text)
def patch_toText(self, patches)
Take a list of patches and return a textual representation. Args: patches: Array of Patch objects. Returns: Text representation of patches.
4.027044
4.580341
0.879202
text = [] for (op, data) in diffs: if op == self.DIFF_INSERT: # High ascii will raise UnicodeDecodeError. Use Unicode instead. data = data.encode("utf-8") text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# ")) elif op == self.DIFF_DELETE: text.append("-%d" % len(data)) elif op == self.DIFF_EQUAL: text.append("=%d" % len(data)) return "\t".join(text)
def diff_toDelta(self, diffs)
Crush the diff into an encoded string which describes the operations required to transform text1 into text2. E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. Args: diffs: Array of diff tuples. Returns: Delta text.
3.718794
3.44229
1.080326
s = {} for char in pattern: s[char] = 0 for i in xrange(len(pattern)): s[pattern[i]] |= 1 << (len(pattern) - i - 1) return s
def match_alphabet(self, pattern)
Initialise the alphabet for the Bitap algorithm. Args: pattern: The text to encode. Returns: Hash of character locations.
3.23097
2.896942
1.115304
if type(textline) == unicode: # Patches should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, patch is invalid. textline = textline.encode("ascii") patches = [] if not textline: return patches text = textline.split('\n') while len(text) != 0: m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0]) if not m: raise ValueError("Invalid patch string: " + text[0]) patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if m.group(2) == '': patch.start1 -= 1 patch.length1 = 1 elif m.group(2) == '0': patch.length1 = 0 else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if m.group(4) == '': patch.start2 -= 1 patch.length2 = 1 elif m.group(4) == '0': patch.length2 = 0 else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while len(text) != 0: if text[0]: sign = text[0][0] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode("utf-8") if sign == '+': # Insertion. patch.diffs.append((self.DIFF_INSERT, line)) elif sign == '-': # Deletion. patch.diffs.append((self.DIFF_DELETE, line)) elif sign == ' ': # Minor equality. patch.diffs.append((self.DIFF_EQUAL, line)) elif sign == '@': # Start of next patch. break elif sign == '': # Blank line? Whatever. pass else: # WTF? raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) del text[0] return patches
def patch_fromText(self, textline)
Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input.
2.058603
1.995561
1.031591
if formatter is not None: formatter.prepare(left, right) if diff_options is None: diff_options = {} differ = diff.Differ(**diff_options) diffs = differ.diff(left, right) if formatter is None: return list(diffs) return formatter.format(diffs, left)
def diff_trees(left, right, diff_options=None, formatter=None)
Takes two lxml root elements or element trees
2.910863
3.036508
0.958622
return _diff(etree.fromstring, left, right, diff_options=diff_options, formatter=formatter)
def diff_texts(left, right, diff_options=None, formatter=None)
Takes two Unicode strings containing XML
5.167037
5.010765
1.031187
return _diff(etree.parse, left, right, diff_options=diff_options, formatter=formatter)
def diff_files(left, right, diff_options=None, formatter=None)
Takes two filenames or streams, and diffs the XML in those files
5.971493
5.695004
1.048549
patcher = patch.Patcher() return patcher.patch(actions, tree)
def patch_tree(actions, tree)
Takes an lxml root element or element tree, and a list of actions
5.375236
6.159687
0.872648
tree = etree.fromstring(tree) actions = patch.DiffParser().parse(actions) tree = patch_tree(actions, tree) return etree.tounicode(tree)
def patch_text(actions, tree)
Takes a string with XML and a string with actions
5.094909
4.261533
1.195558
tree = etree.parse(tree) if isinstance(actions, six.string_types): # It's a string, so it's a filename with open(actions) as f: actions = f.read() else: # We assume it's a stream actions = actions.read() actions = patch.DiffParser().parse(actions) tree = patch_tree(actions, tree) return etree.tounicode(tree)
def patch_file(actions, tree)
Takes two filenames or streams, one with XML the other a diff
3.580269
2.86472
1.24978
# We don't want to diff comments: self._remove_comments(left_tree) self._remove_comments(right_tree) self.placeholderer.do_tree(left_tree) self.placeholderer.do_tree(right_tree)
def prepare(self, left_tree, right_tree)
prepare() is run on the trees before diffing This is so the formatter can apply magic before diffing.
5.533523
4.739836
1.16745
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = None for result_tuple in self.__feature_generator.generate(): observed_arr = result_tuple[0] break observed_arr = observed_arr.astype(float) if self.__norm_mode == "z_score": if observed_arr.std() != 0: observed_arr = (observed_arr - observed_arr.mean()) / observed_arr.std() elif self.__norm_mode == "min_max": if (observed_arr.max() - observed_arr.min()) != 0: observed_arr = (observed_arr - observed_arr.min()) / (observed_arr.max() - observed_arr.min()) elif self.__norm_mode == "tanh": observed_arr = np.tanh(observed_arr) return observed_arr
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
null
null
null
def generate(self): ''' Generate noise samples. Returns: `np.ndarray` of samples. ''' sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim)) for batch in range(self.__batch_size): for i in range(len(self.__program_list)): program_key = self.__program_list[i] key = np.random.randint(low=0, high=len(self.__midi_df_list)) midi_df = self.__midi_df_list[key] midi_df = midi_df[midi_df.program == program_key] if midi_df.shape[0] < self.__seq_len: continue row = np.random.uniform( low=midi_df.start.min(), high=midi_df.end.max() - (self.__seq_len * self.__time_fraction) ) for seq in range(self.__seq_len): start = row + (seq * self.__time_fraction) end = row + ((seq+1) * self.__time_fraction) df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)] sampled_arr[batch, i, seq] = self.__convert_into_feature(df) return sampled_arr
Generate noise samples. Returns: `np.ndarray` of samples.
null
null
null
def generate(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = None for result_tuple in self.__feature_generator.generate(): observed_arr = result_tuple[0] break if self.noise_sampler is not None: self.noise_sampler.output_shape = observed_arr.shape observed_arr += self.noise_sampler.generate() observed_arr = observed_arr.astype(float) if self.__norm_mode == "z_score": if observed_arr.std() != 0: observed_arr = (observed_arr - observed_arr.mean()) / observed_arr.std() elif self.__norm_mode == "min_max": if (observed_arr.max() - observed_arr.min()) != 0: observed_arr = (observed_arr - observed_arr.min()) / (observed_arr.max() - observed_arr.min()) elif self.__norm_mode == "tanh": observed_arr = np.tanh(observed_arr) return observed_arr
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
null
null
null
def compute(self, x_arr, y_arr): ''' Compute distance. Args: x_arr: `np.ndarray` of vectors. y_arr: `np.ndarray` of vectors. Retruns: `np.ndarray` of distances. ''' y_arr += 1e-08 return np.sum(x_arr * np.log(x_arr / y_arr), axis=-1)
Compute distance. Args: x_arr: `np.ndarray` of vectors. y_arr: `np.ndarray` of vectors. Retruns: `np.ndarray` of distances.
null
null
null
def generate_ngram_data_set(self, token_list, n=2): ''' Generate the N-gram's pair. Args: token_list: The list of tokens. n N Returns: zip of Tuple(Training N-gram data, Target N-gram data) ''' n_gram_tuple_zip = self.generate_tuple_zip(token_list, n) n_gram_tuple_list = [n_gram_tuple for n_gram_tuple in n_gram_tuple_zip] n_gram_data_set = self.generate_tuple_zip(n_gram_tuple_list, 2) return n_gram_data_set
Generate the N-gram's pair. Args: token_list: The list of tokens. n N Returns: zip of Tuple(Training N-gram data, Target N-gram data)
null
null
null
def generate_skip_gram_data_set(self, token_list): ''' Generate the Skip-gram's pair. Args: token_list: The list of tokens. Returns: zip of Tuple(Training N-gram data, Target N-gram data) ''' n_gram_tuple_zip = self.generate_tuple_zip(token_list, 3) skip_gram_list = [] for pre, point, post in n_gram_tuple_zip: skip_gram_list.append((point, pre)) skip_gram_list.append((point, post)) return zip(skip_gram_list)
Generate the Skip-gram's pair. Args: token_list: The list of tokens. Returns: zip of Tuple(Training N-gram data, Target N-gram data)
null
null
null
def generate_tuple_zip(self, token_list, n=2): ''' Generate the N-gram. Args: token_list: The list of tokens. n N Returns: zip of Tuple(N-gram) ''' return zip(*[token_list[i:] for i in range(n)])
Generate the N-gram. Args: token_list: The list of tokens. n N Returns: zip of Tuple(N-gram)
null
null
null
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' sampled_arr = np.empty((self.__batch_size, self.__seq_len, self.__dim)) for batch in range(self.__batch_size): key = np.random.randint(low=0, high=len(self.__midi_df_list)) midi_df = self.__midi_df_list[key] program_arr = midi_df.program.drop_duplicates().values key = np.random.randint(low=0, high=program_arr.shape[0]) program_key = program_arr[key] midi_df = midi_df[midi_df.program == program_key] if midi_df.shape[0] < self.__seq_len: raise ValueError("The length of musical performance (program: " + str(program_key) + " is short.") row = np.random.uniform( low=midi_df.start.min(), high=midi_df.end.max() - (self.__seq_len * self.__time_fraction) ) for seq in range(self.__seq_len): start = row + (seq * self.__time_fraction) end = row + ((seq+1) * self.__time_fraction) df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)] sampled_arr[batch, seq] = self.__convert_into_feature(df) return sampled_arr
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
null
null
null
''' Download PDF file and transform its document to string. Args: url: PDF url. Returns: string. ''' path, headers = urllib.request.urlretrieve(url) return self.path_to_text(path)
def url_to_text(self, url)
Download PDF file and transform its document to string. Args: url: PDF url. Returns: string.
7.284948
2.966573
2.455678
''' Transform local PDF file to string. Args: path: path to PDF file. Returns: string. ''' rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() pages_data = PDFPage.get_pages( fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True ) for page in pages_data: interpreter.process_page(page) text = retstr.getvalue() text = text.replace("\n", "") fp.close() device.close() retstr.close() return text
def path_to_text(self, path)
Transform local PDF file to string. Args: path: path to PDF file. Returns: string.
1.866135
1.561491
1.195098
''' setter ''' if isinstance(value, TokenizableDoc): self.__tokenizable_doc = value else: raise TypeError()
def set_tokenizable_doc(self, value)
setter
4.725719
4.632932
1.020028
''' Divide string into sentence list. Args: data: string. counter: recursive counter. Returns: List of sentences. ''' delimiter = self.delimiter_list[counter] sentence_list = [] [sentence_list.append(sentence + delimiter) for sentence in data.split(delimiter) if sentence != ""] if counter + 1 < len(self.delimiter_list): sentence_list_r = [] [sentence_list_r.extend(self.listup_sentence(sentence, counter+1)) for sentence in sentence_list] sentence_list = sentence_list_r return sentence_list
def listup_sentence(self, data, counter=0)
Divide string into sentence list. Args: data: string. counter: recursive counter. Returns: List of sentences.
3.108744
2.222029
1.399056
''' Entry Point. Args: url: PDF url. ''' # The object of Web-scraping. web_scrape = WebScraping() # Set the object of reading PDF files. web_scrape.readable_web_pdf = WebPDFReading() # Execute Web-scraping. document = web_scrape.scrape(url) # The object of automatic sumamrization. auto_abstractor = AutoAbstractor() # Set tokenizer. This is japanese tokenizer with MeCab. auto_abstractor.tokenizable_doc = MeCabTokenizer() # Object of abstracting and filtering document. abstractable_doc = TopNRankAbstractor() # Execute summarization. result_dict = auto_abstractor.summarize(document, abstractable_doc) # Output summarized sentence. [print(sentence) for sentence in result_dict["summarize_result"]]
def Main(url)
Entry Point. Args: url: PDF url.
8.095263
7.068498
1.145259
''' Observation data. Args: success: The number of success. failure: The number of failure. ''' if isinstance(success, int) is False: if isinstance(success, float) is False: raise TypeError() if isinstance(failure, int) is False: if isinstance(failure, float) is False: raise TypeError() if success <= 0: raise ValueError() if failure <= 0: raise ValueError() self.__success += success self.__failure += failure
def observe(self, success, failure)
Observation data. Args: success: The number of success. failure: The number of failure.
2.611812
2.033792
1.284208
''' Compute likelihood. Returns: likelihood. ''' try: likelihood = self.__success / (self.__success + self.__failure) except ZeroDivisionError: likelihood = 0.0 return likelihood
def likelihood(self)
Compute likelihood. Returns: likelihood.
5.095184
3.448633
1.47745
''' Compute expected value. Returns: Expected value. ''' alpha = self.__success + self.__default_alpha beta = self.__failure + self.__default_beta try: expected_value = alpha / (alpha + beta) except ZeroDivisionError: expected_value = 0.0 return expected_value
def expected_value(self)
Compute expected value. Returns: Expected value.
4.489103
3.587366
1.251365
''' Compute variance. Returns: variance. ''' alpha = self.__success + self.__default_alpha beta = self.__failure + self.__default_beta try: variance = alpha * beta / ((alpha + beta) ** 2) * (alpha + beta + 1) except ZeroDivisionError: variance = 0.0 return variance
def variance(self)
Compute variance. Returns: variance.
4.756139
3.901515
1.219049
''' Concreat method. Args: state_key The key of state. this value is point in map. Returns: [(x, y)] ''' if state_key in self.__state_action_list_dict: return self.__state_action_list_dict[state_key] else: action_list = [] state_key_list = [action_list.extend(self.__state_action_list_dict[k]) for k in self.__state_action_list_dict.keys() if len([s for s in state_key if s in k]) > 0] return action_list
def extract_possible_actions(self, state_key)
Concreat method. Args: state_key The key of state. this value is point in map. Returns: [(x, y)]
4.176904
2.262469
1.846171
''' Compute the reward value. Args: state_key: The key of state. action_key: The key of action. Returns: Reward value. ''' reward_value = 0.0 if state_key in self.__state_action_list_dict: if action_key in self.__state_action_list_dict[state_key]: reward_value = 1.0 return reward_value
def observe_reward_value(self, state_key, action_key)
Compute the reward value. Args: state_key: The key of state. action_key: The key of action. Returns: Reward value.
2.663547
2.031289
1.311259
def convert_tokens_into_matrix(self, token_list): ''' Create matrix of sentences. Args: token_list: The list of tokens. Returns: 2-D `np.ndarray` of sentences. Each row means one hot vectors of one sentence. ''' return np.array(self.vectorize(token_list)).astype(np.float32)
Create matrix of sentences. Args: token_list: The list of tokens. Returns: 2-D `np.ndarray` of sentences. Each row means one hot vectors of one sentence.
null
null
null
def tokenize(self, vector_list): ''' Tokenize vector. Args: vector_list: The list of vector of one token. Returns: token ''' vector_arr = np.array(vector_list) if vector_arr.ndim == 1: key_arr = vector_arr.argmax() else: key_arr = vector_arr.argmax(axis=-1) return self.__token_arr[key_arr]
Tokenize vector. Args: vector_list: The list of vector of one token. Returns: token
null
null
null
''' Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...] ''' vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list] return vector_list
def vectorize(self, token_list)
Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...]
5.766688
2.713814
2.124938
''' Move in the feature map. Args: current_pos: The now position. Returns: The next position. ''' if self.__move_range is not None: next_pos = np.random.randint(current_pos - self.__move_range, current_pos + self.__move_range) if next_pos < 0: next_pos = 0 elif next_pos >= self.var_arr.shape[0] - 1: next_pos = self.var_arr.shape[0] - 1 return next_pos else: next_pos = np.random.randint(self.var_arr.shape[0] - 1) return next_pos
def __move(self, current_pos)
Move in the feature map. Args: current_pos: The now position. Returns: The next position.
2.557589
1.962831
1.30301
''' Annealing. ''' shape_list = list(self.var_arr.shape) shape_list[0] = self.__cycles_num + 1 self.var_log_arr = np.zeros(tuple(shape_list)) current_pos = self.__start_pos current_var_arr = self.var_arr[current_pos, :] current_cost_arr = self.__cost_functionable.compute(self.var_arr[current_pos, :]) self.computed_cost_arr = np.zeros(self.__cycles_num + 1) self.computed_cost_arr[0] = current_cost_arr t = self.__init_temp delta_e_avg = 0.0 pos_log_list = [current_pos] predicted_log_list = [] for i in range(self.__cycles_num): if isinstance(self.__tolerance_diff_e, float) and len(predicted_log_list) > 1: diff = abs(predicted_log_list[-1][2] - predicted_log_list[-2][2]) if diff < self.__tolerance_diff_e: break for j in range(self.__trials_per_cycle): current_pos = self.__move(current_pos) pos_log_list.append(current_pos) self.__now_dist_mat_arr = self.var_arr[current_pos, :] cost_arr = self.__cost_functionable.compute(self.__now_dist_mat_arr) delta_e = np.abs(cost_arr - current_cost_arr) if (cost_arr > current_cost_arr): if (i == 0 and j == 0): delta_e_avg = delta_e try: p = np.exp(-delta_e/(delta_e_avg * t)) except ZeroDivisionError: p = 0.0 if (np.random.random() < p): accept = True else: accept = False else: accept = True p = 0.0 if accept is True: current_var_arr = self.__now_dist_mat_arr current_cost_arr = cost_arr self.__accepted_sol_num = self.__accepted_sol_num + 1.0 delta_e_avg = (delta_e_avg * (self.__accepted_sol_num - 1.0) + delta_e) / self.__accepted_sol_num predicted_log_list.append((cost_arr , delta_e, delta_e_avg, p, int(accept))) self.var_log_arr[i + 1] = current_var_arr self.computed_cost_arr[i + 1] = current_cost_arr t = t * self.__fractional_reduction self.predicted_log_arr = np.array(predicted_log_list)
def annealing(self)
Annealing.
2.687699
2.683676
1.001499
def draw(self): ''' Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.noise_sampler.generate() _ = self.inference(observed_arr) feature_arr = self.__convolutional_auto_encoder.extract_feature_points_arr() for i in range(len(self.__deconvolution_layer_list)): try: feature_arr = self.__deconvolution_layer_list[i].forward_propagate(feature_arr) except: self.__logger.debug("Error raised in Deconvolution layer " + str(i + 1)) raise return feature_arr
Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples.
null
null
null
def learn(self, grad_arr): ''' Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. Returns: `np.ndarray` of delta or gradients. ''' deconvolution_layer_list = self.__deconvolution_layer_list[::-1] for i in range(len(deconvolution_layer_list)): try: grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr) except: self.__logger.debug("Error raised in Convolution layer " + str(i + 1)) raise self.__optimize_deconvolution_layer(self.__learning_rate, 1) layerable_cnn_list = self.__convolutional_auto_encoder.layerable_cnn_list[::-1] for i in range(len(layerable_cnn_list)): try: grad_arr = layerable_cnn_list[i].back_propagate(grad_arr) except: self.__logger.debug( "Delta computation raised an error in CNN layer " + str(len(layerable_cnn_list) - i) ) raise self.__convolutional_auto_encoder.optimize(self.__learning_rate, 1) return grad_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. Returns: `np.ndarray` of delta or gradients.
null
null
null
def __optimize_deconvolution_layer(self, learning_rate, epoch): ''' Back propagation for Deconvolution layer. Args: learning_rate: Learning rate. epoch: Now epoch. ''' params_list = [] grads_list = [] for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: params_list.append(self.__deconvolution_layer_list[i].graph.weight_arr) grads_list.append(self.__deconvolution_layer_list[i].delta_weight_arr) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: params_list.append(self.__deconvolution_layer_list[i].graph.bias_arr) grads_list.append(self.__deconvolution_layer_list[i].delta_bias_arr) params_list = self.__opt_params.optimize( params_list, grads_list, learning_rate ) i = 0 for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: self.__deconvolution_layer_list[i].graph.weight_arr = params_list.pop(0) if ((epoch + 1) % self.__attenuate_epoch == 0): self.__deconvolution_layer_list[i].graph.weight_arr = self.__opt_params.constrain_weight( self.__deconvolution_layer_list[i].graph.weight_arr ) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: self.__deconvolution_layer_list[i].graph.bias_arr = params_list.pop(0) for i in range(len(self.__deconvolution_layer_list)): if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0: if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0: self.__deconvolution_layer_list[i].reset_delta()
Back propagation for Deconvolution layer. Args: learning_rate: Learning rate. epoch: Now epoch.
null
null
null
def update(self): ''' Update the encoder and the decoder to minimize the reconstruction error of the inputs. Returns: `np.ndarray` of the reconstruction errors. ''' observed_arr = self.noise_sampler.generate() inferenced_arr = self.inference(observed_arr) error_arr = self.__convolutional_auto_encoder.computable_loss.compute_loss( observed_arr, inferenced_arr ) delta_arr = self.__convolutional_auto_encoder.computable_loss.compute_delta( observed_arr, inferenced_arr ) delta_arr = self.__convolutional_auto_encoder.back_propagation(delta_arr) self.__convolutional_auto_encoder.optimize(self.__learning_rate, 1) return error_arr
Update the encoder and the decoder to minimize the reconstruction error of the inputs. Returns: `np.ndarray` of the reconstruction errors.
null
null
null
''' getter ''' if isinstance(self.__readable_web_pdf, ReadableWebPDF) is False and self.__readable_web_pdf is not None: raise TypeError("The type of __readable_web_pdf must be ReadableWebPDF.") return self.__readable_web_pdf
def get_readable_web_pdf(self)
getter
3.841331
3.531823
1.087634
''' setter ''' if isinstance(value, ReadableWebPDF) is False and value is not None: raise TypeError("The type of __readable_web_pdf must be ReadableWebPDF.") self.__readable_web_pdf = value
def set_readable_web_pdf(self, value)
setter
4.326009
4.350037
0.994477
''' Execute Web-Scraping. The target dom objects are in self.__dom_object_list. Args: url: Web site url. Returns: The result. this is a string. @TODO(chimera0): check URLs format. ''' if isinstance(url, str) is False: raise TypeError("The type of url must be str.") if self.readable_web_pdf is not None and self.readable_web_pdf.is_pdf_url(url) is True: web_data = self.readable_web_pdf.url_to_text(url) else: web_data = "" req = urllib.request.Request(url=url) with urllib.request.urlopen(req) as f: web = f.read().decode('utf-8') dom = pq(web) [dom(remove_object).remove() for remove_object in self.__remove_object_list] for dom_object in self.__dom_object_list: web_data += dom(dom_object).text() sleep(1) return web_data
def scrape(self, url)
Execute Web-Scraping. The target dom objects are in self.__dom_object_list. Args: url: Web site url. Returns: The result. this is a string. @TODO(chimera0): check URLs format.
4.684637
2.463641
1.901509
''' Extract MIDI file. Args: file_path: File path of MIDI. is_drum: Extract drum data or not. Returns: pd.DataFrame(columns=["program", "start", "end", "pitch", "velocity", "duration"]) ''' midi_data = pretty_midi.PrettyMIDI(file_path) note_tuple_list = [] for instrument in midi_data.instruments: if (is_drum is False and instrument.is_drum is False) or (is_drum is True and instrument.is_drum is True): for note in instrument.notes: note_tuple_list.append((instrument.program, note.start, note.end, note.pitch, note.velocity)) note_df = pd.DataFrame(note_tuple_list, columns=["program", "start", "end", "pitch", "velocity"]) note_df = note_df.sort_values(by=["program", "start", "end"]) note_df["duration"] = note_df.end - note_df.start return note_df
def extract(self, file_path, is_drum=False)
Extract MIDI file. Args: file_path: File path of MIDI. is_drum: Extract drum data or not. Returns: pd.DataFrame(columns=["program", "start", "end", "pitch", "velocity", "duration"])
2.057857
1.561886
1.317546
''' Save MIDI file. Args: file_path: File path of MIDI. note_df: `pd.DataFrame` of note data. ''' chord = pretty_midi.PrettyMIDI() for program in note_df.program.drop_duplicates().values.tolist(): df = note_df[note_df.program == program] midi_obj = pretty_midi.Instrument(program=program) for i in range(df.shape[0]): note = pretty_midi.Note( velocity=int(df.iloc[i, :]["velocity"]), pitch=int(df.iloc[i, :]["pitch"]), start=float(df.iloc[i, :]["start"]), end=float(df.iloc[i, :]["end"]) ) # Add it to our cello instrument midi_obj.notes.append(note) # Add the cello instrument to the PrettyMIDI object chord.instruments.append(midi_obj) # Write out the MIDI data chord.write(file_path)
def save(self, file_path, note_df)
Save MIDI file. Args: file_path: File path of MIDI. note_df: `pd.DataFrame` of note data.
2.728137
2.400225
1.136617
''' Compute cost. Args: x: `np.ndarray` of explanatory variables. Returns: cost ''' q_learning = copy(self.__greedy_q_learning) q_learning.epsilon_greedy_rate = x[0] q_learning.alpha_value = x[1] q_learning.gamma_value = x[2] if self.__init_state_key is not None: q_learning.learn(state_key=self.__init_state_key, limit=int(x[3])) else: q_learning.learn(limit=x[3]) q_sum = q_learning.q_df.q_value.sum() if q_sum != 0: cost = q_learning.q_df.shape[0] / q_sum else: cost = q_learning.q_df.shape[0] / 1e-4 return cost
def compute(self, x)
Compute cost. Args: x: `np.ndarray` of explanatory variables. Returns: cost
3.756557
3.230602
1.162804
''' Entry Point. Args: url: target url. ''' # The object of Web-Scraping. web_scrape = WebScraping() # Execute Web-Scraping. document = web_scrape.scrape(url) # The object of automatic summarization with N-gram. auto_abstractor = NgramAutoAbstractor() # n-gram object auto_abstractor.n_gram = Ngram() # n of n-gram auto_abstractor.n = 3 # Set tokenizer. This is japanese tokenizer with MeCab. auto_abstractor.tokenizable_doc = MeCabTokenizer() # Object of abstracting and filtering document. abstractable_doc = TopNRankAbstractor() # Execute summarization. result_dict = auto_abstractor.summarize(document, abstractable_doc) # Output 3 summarized sentences. limit = 3 i = 1 for sentence in result_dict["summarize_result"]: print(sentence) if i >= limit: break i += 1
def Main(url)
Entry Point. Args: url: target url.
5.944719
5.443427
1.092091
''' getter ''' if isinstance(self.__target_n, int) is False: raise TypeError("The type of __target_n must be int.") return self.__target_n
def get_target_n(self)
getter
4.726547
4.260841
1.109299
''' setter ''' if isinstance(value, int) is False: raise TypeError("The type of __target_n must be int.") self.__target_n = value
def set_target_n(self, value)
setter
4.960048
4.999998
0.99201
''' getter ''' if isinstance(self.__cluster_threshold, int) is False: raise TypeError("The type of __cluster_threshold must be int.") return self.__cluster_threshold
def get_cluster_threshold(self)
getter
4.96618
4.451202
1.115694
''' setter ''' if isinstance(value, int) is False: raise TypeError("The type of __cluster_threshold must be int.") self.__cluster_threshold = value
def set_cluster_threshold(self, value)
setter
5.26591
5.232596
1.006367
''' getter ''' if isinstance(self.__top_sentences, int) is False: raise TypeError("The type of __top_sentences must be int.") return self.__top_sentences
def get_top_sentences(self)
getter
5.562199
5.030997
1.105586
''' setter ''' if isinstance(value, int) is False: raise TypeError("The type of __top_sentences must be int.") self.__top_sentences = value
def set_top_sentences(self, value)
setter
5.423862
5.419242
1.000853
''' Execute summarization. Args: document: The target document. Abstractor: The object of AbstractableDoc. similarity_filter The object of SimilarityFilter. Returns: dict data. - "summarize_result": The list of summarized sentences., - "scoring_data": The list of scores. ''' if isinstance(document, str) is False: raise TypeError("The type of document must be str.") if isinstance(Abstractor, AbstractableDoc) is False: raise TypeError("The type of Abstractor must be AbstractableDoc.") if isinstance(similarity_filter, SimilarityFilter) is False and similarity_filter is not None: raise TypeError("The type of similarity_filter must be SimilarityFilter.") normalized_sentences = self.listup_sentence(document) # for filtering similar sentences. if similarity_filter is not None: normalized_sentences = similarity_filter.similar_filter_r(normalized_sentences) self.tokenize(document) words = self.token fdist = nltk.FreqDist(words) top_n_words = [w[0] for w in fdist.items()][:self.target_n] scored_list = self.__closely_associated_score(normalized_sentences, top_n_words) filtered_list = Abstractor.filter(scored_list) result_list = [normalized_sentences[idx] for (idx, score) in filtered_list] result_dict = { "summarize_result": result_list, "scoring_data": filtered_list } return result_dict
def summarize(self, document, Abstractor, similarity_filter=None)
Execute summarization. Args: document: The target document. Abstractor: The object of AbstractableDoc. similarity_filter The object of SimilarityFilter. Returns: dict data. - "summarize_result": The list of summarized sentences., - "scoring_data": The list of scores.
3.638717
2.402081
1.514818
''' Scoring the sentence with closely associations. Args: normalized_sentences: The list of sentences. top_n_words: Important sentences. Returns: The list of scores. ''' scores_list = [] sentence_idx = -1 for sentence in normalized_sentences: self.tokenize(sentence) sentence = self.token sentence_idx += 1 word_idx = [] for w in top_n_words: try: word_idx.append(sentence.index(w)) except ValueError: pass word_idx.sort() if len(word_idx) == 0: continue clusters = [] cluster = [word_idx[0]] i = 1 while i < len(word_idx): if word_idx[i] - word_idx[i - 1] < self.cluster_threshold: cluster.append(word_idx[i]) else: clusters.append(cluster[:]) cluster = [word_idx[i]] i += 1 clusters.append(cluster) max_cluster_score = 0 for c in clusters: significant_words_in_cluster = len(c) total_words_in_cluster = c[-1] - c[0] + 1 score = 1.0 * significant_words_in_cluster \ * significant_words_in_cluster / total_words_in_cluster if score > max_cluster_score: max_cluster_score = score scores_list.append((sentence_idx, score)) return scores_list
def __closely_associated_score(self, normalized_sentences, top_n_words)
Scoring the sentence with closely associations. Args: normalized_sentences: The list of sentences. top_n_words: Important sentences. Returns: The list of scores.
2.413774
2.005952
1.203306
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' return np.random.normal(loc=self.__mu, scale=self.__sigma, size=self.__output_shape)
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
null
null
null
''' Multi-Agent Learning. Override. Args: initial_state_key: Initial state. limit: Limit of the number of learning. game_n: The number of games. ''' end_flag = False state_key_list = [None] * len(self.q_learning_list) action_key_list = [None] * len(self.q_learning_list) next_action_key_list = [None] * len(self.q_learning_list) for game in range(game_n): state_key = initial_state_key self.t = 1 while self.t <= limit: for i in range(len(self.q_learning_list)): state_key_list[i] = state_key if game + 1 == game_n: self.state_key_list.append(tuple(i, state_key_list)) self.q_learning_list[i].t = self.t next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, state_key_list)) if len(next_action_list): action_key = self.q_learning_list[i].select_action( state_key=tuple(i, state_key_list), next_action_list=next_action_list ) action_key_list[i] = action_key reward_value = self.q_learning_list[i].observe_reward_value( tuple(i, state_key_list), tuple(i, action_key_list) ) # Check. if self.q_learning_list[i].check_the_end_flag(tuple(i, state_key_list)) is True: end_flag = True # Max-Q-Value in next action time. next_next_action_list = self.q_learning_list[i].extract_possible_actions( tuple(i, action_key_list) ) if len(next_next_action_list): next_action_key = self.q_learning_list[i].predict_next_action( tuple(i, action_key_list), next_next_action_list ) next_action_key_list[i] = next_action_key next_max_q = self.q_learning_list[i].extract_q_df( tuple(i, action_key_list), next_action_key ) # Update Q-Value. self.q_learning_list[i].update_q( state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list), reward_value=reward_value, next_max_q=next_max_q ) # Update State. state_key = self.q_learning_list[i].update_state( state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list) ) state_key_list[i] = state_key # Epsode. self.t += 1 self.q_learning_list[i].t = self.t if end_flag is True: break
def learn(self, initial_state_key, limit=1000, game_n=1)
Multi-Agent Learning. Override. Args: initial_state_key: Initial state. limit: Limit of the number of learning. game_n: The number of games.
2.140061
1.979808
1.080944
def draw(self): ''' Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.extract_conditions() conv_arr = self.inference(observed_arr) if self.__conditon_noise_sampler is not None: self.__conditon_noise_sampler.output_shape = conv_arr.shape noise_arr = self.__conditon_noise_sampler.generate() conv_arr += noise_arr deconv_arr = self.__deconvolution_model.inference(conv_arr) return np.concatenate((deconv_arr, observed_arr), axis=1)
Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples.
null
null
null
def learn(self, grad_arr, fix_opt_flag=False): ''' Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients. ''' channel = grad_arr.shape[1] // 2 grad_arr = self.__deconvolution_model.learn(grad_arr[:, :channel], fix_opt_flag=fix_opt_flag) delta_arr = self.__cnn.back_propagation(grad_arr) if fix_opt_flag is False: self.__cnn.optimize(self.__learning_rate, 1) return delta_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients.
null
null
null
def inference(self, observed_arr): ''' Draws samples from the `fake` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced. ''' for i in range(len(self.__deconvolution_layer_list)): try: observed_arr = self.__deconvolution_layer_list[i].forward_propagate(observed_arr) except: self.__logger.debug("Error raised in Deconvolution layer " + str(i + 1)) raise return observed_arr
Draws samples from the `fake` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced.
null
null
null
def learn(self, grad_arr, fix_opt_flag=False): ''' Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients. ''' deconvolution_layer_list = self.__deconvolution_layer_list[::-1] for i in range(len(deconvolution_layer_list)): try: grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr) except: self.__logger.debug("Error raised in Convolution layer " + str(i + 1)) raise if fix_opt_flag is False: self.__optimize(self.__learning_rate, 1) return grad_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients.
null
null
null
def inference(self, observed_arr): ''' Draws samples from the `true` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced. ''' self.__pred_arr = self.__lstm_model.inference(observed_arr) return self.__pred_arr
Draws samples from the `true` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced.
null
null
null
def learn(self, grad_arr, fix_opt_flag=False): ''' Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients. ''' if grad_arr.ndim > 3: grad_arr = grad_arr.reshape(( grad_arr.shape[0], grad_arr.shape[1], -1 )) delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr) if fix_opt_flag is False: self.__lstm_model.optimize( grads_list, self.__learning_rate, 1 ) return delta_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients.
null
null
null
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' return np.random.uniform(loc=self.__low, scale=self.__high, size=self.__output_shape)
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
null
null
null
''' getter ''' if isinstance(self.__nlp_base, NlpBase) is False: raise TypeError("The type of self.__nlp_base must be NlpBase.") return self.__nlp_base
def get_nlp_base(self)
getter
4.715467
4.163293
1.132629
''' setter ''' if isinstance(value, NlpBase) is False: raise TypeError("The type of value must be NlpBase.") self.__nlp_base = value
def set_nlp_base(self, value)
setter
4.825709
4.789385
1.007584
''' getter ''' if isinstance(self.__similarity_limit, float) is False: raise TypeError("__similarity_limit must be float.") return self.__similarity_limit
def get_similarity_limit(self)
getter
5.533032
4.929867
1.122349
''' setter ''' if isinstance(value, float) is False: raise TypeError("__similarity_limit must be float.") self.__similarity_limit = value
def set_similarity_limit(self, value)
setter
6.041739
6.339773
0.95299
''' Remove duplicated elements. Args: token_list_x: [token, token, token, ...] token_list_y: [token, token, token, ...] Returns: Tuple(token_list_x, token_list_y) ''' x = set(list(token_list_x)) y = set(list(token_list_y)) return (x, y)
def unique(self, token_list_x, token_list_y)
Remove duplicated elements. Args: token_list_x: [token, token, token, ...] token_list_y: [token, token, token, ...] Returns: Tuple(token_list_x, token_list_y)
2.802025
1.775921
1.577787
''' Count the number of tokens in `token_list`. Args: token_list: The list of tokens. Returns: {token: the numbers} ''' token_dict = {} for token in token_list: if token in token_dict: token_dict[token] += 1 else: token_dict[token] = 1 return token_dict
def count(self, token_list)
Count the number of tokens in `token_list`. Args: token_list: The list of tokens. Returns: {token: the numbers}
2.911412
1.613694
1.80419
''' Filter mutually similar sentences. Args: sentence_list: The list of sentences. Returns: The list of filtered sentences. ''' result_list = [] recursive_list = [] try: self.nlp_base.tokenize(sentence_list[0]) subject_token = self.nlp_base.token result_list.append(sentence_list[0]) if len(sentence_list) > 1: for i in range(len(sentence_list)): if i > 0: self.nlp_base.tokenize(sentence_list[i]) object_token = self.nlp_base.token similarity = self.calculate(subject_token, object_token) if similarity <= self.similarity_limit: recursive_list.append(sentence_list[i]) if len(recursive_list) > 0: result_list.extend(self.similar_filter_r(recursive_list)) except IndexError: result_list = sentence_list return result_list
def similar_filter_r(self, sentence_list)
Filter mutually similar sentences. Args: sentence_list: The list of sentences. Returns: The list of filtered sentences.
2.53693
2.236635
1.134262
''' Annealing. ''' self.__predicted_log_list = [] for cycle in range(self.__cycles_num): for mc_step in range(self.__mc_step): self.__move() self.__gammma *= self.__fractional_reduction if isinstance(self.__tolerance_diff_e, float) and len(self.__predicted_log_list) > 1: diff = abs(self.__predicted_log_list[-1][5] - self.__predicted_log_list[-2][5]) if diff < self.__tolerance_diff_e: break self.predicted_log_arr = np.array(self.__predicted_log_list)
def annealing(self)
Annealing.
4.653313
4.579215
1.016181
def calculate(self, token_list_x, token_list_y): ''' Calculate similarity with the Jaccard coefficient. Concrete method. Args: token_list_x: [token, token, token, ...] token_list_y: [token, token, token, ...] Returns: Similarity. ''' x, y = self.unique(token_list_x, token_list_y) try: result = len(x & y) / len(x | y) except ZeroDivisionError: result = 0.0 return result
Calculate similarity with the Jaccard coefficient. Concrete method. Args: token_list_x: [token, token, token, ...] token_list_y: [token, token, token, ...] Returns: Similarity.
null
null
null
def set_noise_sampler(self, value): ''' setter ''' if isinstance(value, NoiseSampler) is False: raise TypeError("The type of `__noise_sampler` must be `NoiseSampler`.") self.__noise_sampler = value
setter
null
null
null
''' Infernce Q-Value. Args: predicted_q_arr: `np.ndarray` of predicted Q-Values. real_q_arr: `np.ndarray` of real Q-Values. ''' loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr) delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr) delta_arr = self.__cnn.back_propagation(delta_arr) self.__cnn.optimize(self.__learning_rate, 1) self.__loss_list.append(loss)
def learn_q(self, predicted_q_arr, real_q_arr)
Infernce Q-Value. Args: predicted_q_arr: `np.ndarray` of predicted Q-Values. real_q_arr: `np.ndarray` of real Q-Values.
3.431894
2.544688
1.34865
''' `object` of model as a function approximator, which has `cnn` whose type is `pydbm.cnn.pydbm.cnn.convolutional_neural_network.ConvolutionalNeuralNetwork`. ''' class Model(object): def __init__(self, cnn): self.cnn = cnn return Model(self.__cnn)
def get_model(self)
`object` of model as a function approximator, which has `cnn` whose type is `pydbm.cnn.pydbm.cnn.convolutional_neural_network.ConvolutionalNeuralNetwork`.
9.830601
2.36705
4.153102