code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
''' Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF ''' deg = math.floor(value) min = math.floor((value - deg) * 60) sec = math.floor((value - deg - min / 60) * 3600 * precision) return ((deg, 1), (min, 1), (sec, precision))
def decimal_to_dms(value, precision)
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
3.641902
2.257309
1.613382
''' Convert GPS coordinate in GPGGA format to degree/minute/second Reference: http://us.cactii.net/~bb/gps.py ''' deg_min, dmin = gpgga.split('.') degrees = int(deg_min[:-2]) minutes = float('%s.%s' % (deg_min[-2:], dmin)) decimal = degrees + (minutes / 60) return decimal
def gpgga_to_dms(gpgga)
Convert GPS coordinate in GPGGA format to degree/minute/second Reference: http://us.cactii.net/~bb/gps.py
6.807413
2.870228
2.371733
''' Get the compass bearing from start to end. Formula from http://www.movable-type.co.uk/scripts/latlong.html ''' # make sure everything is in radians start_lat = math.radians(start_lat) start_lon = math.radians(start_lon) end_lat = math.radians(end_lat) end_lon = math.radians(end_lon) dLong = end_lon - start_lon dPhi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) / math.tan(start_lat / 2.0 + math.pi / 4.0)) if abs(dLong) > math.pi: if dLong > 0.0: dLong = -(2.0 * math.pi - dLong) else: dLong = (2.0 * math.pi + dLong) y = math.sin(dLong) * math.cos(end_lat) x = math.cos(start_lat) * math.sin(end_lat) - \ math.sin(start_lat) * math.cos(end_lat) * math.cos(dLong) bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0 return bearing
def compute_bearing(start_lat, start_lon, end_lat, end_lon)
Get the compass bearing from start to end. Formula from http://www.movable-type.co.uk/scripts/latlong.html
1.407012
1.304159
1.078866
''' Compute difference between two bearings ''' d = abs(b2 - b1) d = 360 - d if d > 180 else d return d
def diff_bearing(b1, b2)
Compute difference between two bearings
3.68974
3.264681
1.130199
''' Normalize bearing and convert from hex if ''' if bearing > 360 and check_hex: # fix negative value wrongly parsed in exifread # -360 degree -> 4294966935 when converting from hex bearing = bin(int(bearing))[2:] bearing = ''.join([str(int(int(a) == 0)) for a in bearing]) bearing = -float(int(bearing, 2)) bearing %= 360 return bearing
def normalize_bearing(bearing, check_hex=False)
Normalize bearing and convert from hex if
6.901423
6.158039
1.120718
''' Return interpolated lat, lon and compass bearing for time t. Points is a list of tuples (time, lat, lon, elevation), t a datetime object. ''' # find the enclosing points in sorted list if (t <= points[0][0]) or (t >= points[-1][0]): if t <= points[0][0]: dt = abs((points[0][0] - t).total_seconds()) else: dt = (t - points[-1][0]).total_seconds() if dt > max_dt: raise ValueError( "time t not in scope of gpx file by {} seconds".format(dt)) else: print( "time t not in scope of gpx file by {} seconds, extrapolating...".format(dt)) if t < points[0][0]: before = points[0] after = points[1] else: before = points[-2] after = points[-1] bearing = compute_bearing(before[1], before[2], after[1], after[2]) if t == points[0][0]: x = points[0] return (x[1], x[2], bearing, x[3]) if t == points[-1][0]: x = points[-1] return (x[1], x[2], bearing, x[3]) else: for i, point in enumerate(points): if t < point[0]: if i > 0: before = points[i - 1] else: before = points[i] after = points[i] break # weight based on time weight = (t - before[0]).total_seconds() / \ (after[0] - before[0]).total_seconds() # simple linear interpolation in case points are not the same if before[1] == after[1]: lat = before[1] else: lat = before[1] - weight * before[1] + weight * after[1] if before[2] == after[2]: lon = before[2] else: lon = before[2] - weight * before[2] + weight * after[2] # camera angle bearing = compute_bearing(before[1], before[2], after[1], after[2]) # altitude if before[3] is not None: ele = before[3] - weight * before[3] + weight * after[3] else: ele = None return lat, lon, bearing, ele
def interpolate_lat_lon(points, t, max_dt=1)
Return interpolated lat, lon and compass bearing for time t. Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
2.278971
2.039654
1.117332
if self._ef is not None: self._ef['0th'][piexif.ImageIFD.ImageDescription] = json.dumps( dict)
def add_image_description(self, dict)
Add a dict to image description.
9.575993
7.979022
1.200146
if not orientation in range(1, 9): print_error( "Error value for orientation, value must be in range(1,9), setting to default 1") self._ef['0th'][piexif.ImageIFD.Orientation] = 1 else: self._ef['0th'][piexif.ImageIFD.Orientation] = orientation
def add_orientation(self, orientation)
Add image orientation to image.
5.630543
4.940797
1.139602
try: DateTimeOriginal = date_time.strftime(time_format)[:-3] self._ef['Exif'][piexif.ExifIFD.DateTimeOriginal] = DateTimeOriginal except Exception as e: print_error("Error writing DateTimeOriginal, due to " + str(e))
def add_date_time_original(self, date_time, time_format='%Y:%m:%d %H:%M:%S.%f')
Add date time original.
4.47974
4.333383
1.033774
self._ef["GPS"][piexif.GPSIFD.GPSLatitudeRef] = "N" if lat > 0 else "S" self._ef["GPS"][piexif.GPSIFD.GPSLongitudeRef] = "E" if lon > 0 else "W" self._ef["GPS"][piexif.GPSIFD.GPSLongitude] = decimal_to_dms( abs(lon), int(precision)) self._ef["GPS"][piexif.GPSIFD.GPSLatitude] = decimal_to_dms( abs(lat), int(precision))
def add_lat_lon(self, lat, lon, precision=1e7)
Add lat, lon to gps (lat, lon in float).
1.969506
1.911325
1.03044
self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data)
def add_image_history(self, data)
Add arbitrary string to ImageHistory tag.
18.50355
14.384504
1.286353
''' Add camera make and model.''' self._ef['0th'][piexif.ImageIFD.Make] = make self._ef['0th'][piexif.ImageIFD.Model] = model
def add_camera_make_model(self, make, model)
Add camera make and model.
5.753018
5.907116
0.973913
self._ef["GPS"][piexif.GPSIFD.GPSDOP] = ( int(abs(dop) * precision), precision)
def add_dop(self, dop, precision=100)
Add GPSDOP (float).
17.86615
10.562222
1.691514
ref = 0 if altitude > 0 else 1 self._ef["GPS"][piexif.GPSIFD.GPSAltitude] = ( int(abs(altitude) * precision), precision) self._ef["GPS"][piexif.GPSIFD.GPSAltitudeRef] = ref
def add_altitude(self, altitude, precision=100)
Add altitude (pre is the precision).
4.997169
4.889501
1.02202
# normalize direction direction = direction % 360.0 self._ef["GPS"][piexif.GPSIFD.GPSImgDirection] = ( int(abs(direction) * precision), precision) self._ef["GPS"][piexif.GPSIFD.GPSImgDirectionRef] = ref
def add_direction(self, direction, ref="T", precision=100)
Add image direction.
5.57247
5.634546
0.988983
if filename is None: filename = self._filename exif_bytes = piexif.dump(self._ef) with open(self._filename, "rb") as fin: img = fin.read() try: piexif.insert(exif_bytes, img, filename) except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error saving file:", value
def write(self, filename=None)
Save exif data to file.
4.092144
3.64685
1.122104
''' Estimate the capture time of a sequence with sub-second precision EXIF times are only given up to a second of precision. This function uses the given interval between shots to estimate the time inside that second that each picture was taken. ''' if interval <= 0.0: return [exif_time(f) for f in tqdm(files, desc="Reading image capture time")] onesecond = datetime.timedelta(seconds=1.0) T = datetime.timedelta(seconds=interval) for i, f in tqdm(enumerate(files), desc="Estimating subsecond time"): m = exif_time(f) if not m: pass if i == 0: smin = m smax = m + onesecond else: m0 = m - T * i smin = max(smin, m0) smax = min(smax, m0 + onesecond) if not smin or not smax: return None if smin > smax: # ERROR LOG print('Interval not compatible with EXIF times') return None else: s = smin + (smax - smin) / 2 return [s + T * i for i in range(len(files))]
def estimate_sub_second_time(files, interval=0.0)
Estimate the capture time of a sequence with sub-second precision EXIF times are only given up to a second of precision. This function uses the given interval between shots to estimate the time inside that second that each picture was taken.
4.328075
2.757647
1.569481
''' Interpolate time stamps in case of identical timestamps ''' timestamps = [] num_file = len(capture_times) time_dict = OrderedDict() if num_file < 2: return capture_times # trace identical timestamps (always assume capture_times is sorted) time_dict = OrderedDict() for i, t in enumerate(capture_times): if t not in time_dict: time_dict[t] = { "count": 0, "pointer": 0 } interval = 0 if i != 0: interval = (t - capture_times[i - 1]).total_seconds() time_dict[capture_times[i - 1]]["interval"] = interval time_dict[t]["count"] += 1 if len(time_dict) >= 2: # set time interval as the last available time interval time_dict[time_dict.keys()[-1] ]["interval"] = time_dict[time_dict.keys()[-2]]["interval"] else: # set time interval assuming capture interval is 1 second time_dict[time_dict.keys()[0]]["interval"] = time_dict[time_dict.keys()[ 0]]["count"] * 1. # interpolate timestamps for t in capture_times: d = time_dict[t] s = datetime.timedelta( seconds=d["pointer"] * d["interval"] / float(d["count"])) updated_time = t + s time_dict[t]["pointer"] += 1 timestamps.append(updated_time) return timestamps
def interpolate_timestamp(capture_times)
Interpolate time stamps in case of identical timestamps
2.967832
2.803255
1.058709
''' Gets information about a media file TODO: use the class in ffprobe.py - why doesn't it use json output? ''' try: with open(os.devnull, 'w') as tempf: subprocess.check_call( ['ffprobe', '-h'], stdout=tempf, stderr=tempf) except Exception as e: raise IOError('ffprobe not found.') if not os.path.isfile(path): raise IOError('No such file: ' + path) j_str = "" try: j_str = subprocess.check_output([ 'ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', path ]) except subprocess.CalledProcessError: pass j_obj = json.loads(j_str) return j_obj
def get_ffprobe(path)
Gets information about a media file TODO: use the class in ffprobe.py - why doesn't it use json output?
3.071551
2.141665
1.434189
''' Get the data out of the file using ffmpeg @param filename: mp4 filename ''' if not os.path.isfile(source): raise IOError('No such file: ' + source) subprocess.check_output([ 'ffmpeg', '-i', source, '-y', # overwrite - potentially dangerous '-nostats', '-loglevel', '0', '-codec', 'copy', '-map', '0:' + str(stream_id), '-f', 'rawvideo', dest, ])
def extract_stream(source, dest, stream_id)
Get the data out of the file using ffmpeg @param filename: mp4 filename
4.112438
2.963717
1.387595
''' Check that image file has the required EXIF fields. Incompatible files will be ignored server side. ''' # required tags in IFD name convention required_exif = required_fields() exif = ExifRead(filename) required_exif_exist = exif.fields_exist(required_exif) return required_exif_exist
def verify_exif(filename)
Check that image file has the required EXIF fields. Incompatible files will be ignored server side.
9.476157
4.991936
1.898293
filepath_keep_original = processing.processed_images_rootpath(filepath) if os.path.isfile(filepath_keep_original): filepath = filepath_keep_original ''' Check that image file has the required Mapillary tag ''' return ExifRead(filepath).mapillary_tag_exists()
def verify_mapillary_tag(filepath)
Check that image file has the required Mapillary tag
10.876287
6.207256
1.752189
val=False if self.__dict__['codec_type']: if str(self.__dict__['codec_type']) == 'audio': val=True return val
def isAudio(self)
Is this stream labelled as an audio stream?
5.198025
4.07713
1.274923
val=False if self.__dict__['codec_type']: if self.codec_type == 'video': val=True return val
def isVideo(self)
Is the stream labelled as a video stream.
6.439754
5.329398
1.208346
val=False if self.__dict__['codec_type']: if str(self.codec_type)=='subtitle': val=True return val
def isSubtitle(self)
Is the stream labelled as a subtitle stream.
6.623434
5.711026
1.159763
size=None if self.isVideo(): if self.__dict__['width'] and self.__dict__['height']: try: size=(int(self.__dict__['width']),int(self.__dict__['height'])) except Exception as e: print "None integer size %s:%s" %(str(self.__dict__['width']),str(+self.__dict__['height'])) size=(0,0) return size
def frameSize(self)
Returns the pixel frame size as an integer tuple (width,height) if the stream is a video stream. Returns None if it is not a video stream.
3.850215
3.404329
1.130976
f=None if self.isVideo(): if self.__dict__['pix_fmt']: f=self.__dict__['pix_fmt'] return f
def pixelFormat(self)
Returns a string representing the pixel format of the video stream. e.g. yuv420p. Returns none is it is not a video stream.
6.418909
4.010839
1.600391
f=0 if self.isVideo() or self.isAudio(): if self.__dict__['nb_frames']: try: f=int(self.__dict__['nb_frames']) except Exception as e: print "None integer frame count" return f
def frames(self)
Returns the length of a video stream in frames. Returns 0 if not a video stream.
5.823268
4.677674
1.244907
f=0.0 if self.isVideo() or self.isAudio(): if self.__dict__['duration']: try: f=float(self.__dict__['duration']) except Exception as e: print "None numeric duration" return f
def durationSeconds(self)
Returns the runtime duration of the video stream as a floating point number of seconds. Returns 0.0 if not a video stream.
5.843992
5.079145
1.150586
b=0 if self.__dict__['bit_rate']: try: b=int(self.__dict__['bit_rate']) except Exception as e: print "None integer bitrate" return b
def bitrate(self)
Returns bitrate as an integer in bps
5.212346
4.700572
1.108875
''' Returns upload URL using new upload API ''' request_url = "https://a.mapillary.com/v3/users/{}/upload_secrets?client_id={}".format( credentials["MAPSettingsUserKey"], CLIENT_ID) request = urllib2.Request(request_url) request.add_header('Authorization', 'Bearer {}'.format( credentials["user_upload_token"])) try: response = json.loads(urllib2.urlopen(request).read()) except requests.exceptions.HTTPError as e: print("Error getting upload parameters, upload could not start") sys.exit(1) return response
def get_upload_url(credentials)
Returns upload URL using new upload API
4.570963
3.953732
1.156114
''' Get upload token ''' try: params = urllib.urlencode({"email": mail, "password": pwd}) response = urllib.urlopen(LOGIN_URL, params) except: return None resp = json.loads(response.read()) if not resp or 'token' not in resp: return None return resp['token']
def get_upload_token(mail, pwd)
Get upload token
2.953753
2.783448
1.061185
''' Display progress bar sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 ''' bar_len = 60 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('[%s] %s%s %s\r' % (bar, percents, '%', suffix)) sys.stdout.flush()
def progress(count, total, suffix='')
Display progress bar sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
1.518656
1.324917
1.146228
''' Authenticate the user by passing the email and password. This function avoids prompting the command line for user credentials and is useful for calling tools programmatically ''' if user_email is None or user_password is None: raise ValueError( 'Could not authenticate user. Missing username or password') upload_token = uploader.get_upload_token(user_email, user_password) if not upload_token: print("Authentication failed for user name " + user_name + ", please try again.") sys.exit(1) user_key = get_user_key(user_name) if not user_key: print("User name {} does not exist, please try again or contact Mapillary user support.".format( user_name)) sys.exit(1) user_permission_hash, user_signature_hash = get_user_hashes( user_key, upload_token) user_items["MAPSettingsUsername"] = section user_items["MAPSettingsUserKey"] = user_key user_items["user_upload_token"] = upload_token user_items["user_permission_hash"] = user_permission_hash user_items["user_signature_hash"] = user_signature_hash return user_items
def authenticate_with_email_and_pwd(user_email, user_password)
Authenticate the user by passing the email and password. This function avoids prompting the command line for user credentials and is useful for calling tools programmatically
4.154841
3.099245
1.340598
''' Upload file at filepath. ''' if max_attempts == None: max_attempts = MAX_ATTEMPTS filename = os.path.basename(filepath) s3_filename = filename try: s3_filename = ExifRead(filepath).exif_name() except: pass filepath_keep_original = processing.processed_images_rootpath(filepath) filepath_in = filepath if os.path.isfile(filepath_keep_original): filepath = filepath_keep_original # add S3 'path' if given if key is None: s3_key = s3_filename else: s3_key = key + s3_filename parameters = {"key": s3_key, "AWSAccessKeyId": aws_key, "acl": "private", "policy": permission, "signature": signature, "Content-Type": "image/jpeg"} with open(filepath, "rb") as f: encoded_string = f.read() data, headers = encode_multipart( parameters, {'file': {'filename': filename, 'content': encoded_string}}) if (DRY_RUN == False): displayed_upload_error = False for attempt in range(max_attempts): # Initialize response before each attempt response = None try: request = urllib2.Request(url, data=data, headers=headers) response = urllib2.urlopen(request) if response.getcode() == 204: create_upload_log(filepath_in, "upload_success") if displayed_upload_error == True: print("Successful upload of {} on attempt {}".format( filename, attempt)) else: create_upload_log(filepath_in, "upload_failed") break # attempts except urllib2.HTTPError as e: print("HTTP error: {} on {}, will attempt upload again for {} more times".format( e, filename, max_attempts - attempt - 1)) displayed_upload_error = True time.sleep(5) except urllib2.URLError as e: print("URL error: {} on {}, will attempt upload again for {} more times".format( e, filename, max_attempts - attempt - 1)) time.sleep(5) except httplib.HTTPException as e: print("HTTP exception: {} on {}, will attempt upload again for {} more times".format( e, filename, max_attempts - attempt - 1)) time.sleep(5) except OSError as e: print("OS error: {} on {}, will attempt upload again for {} more times".format( e, filename, max_attempts - attempt - 1)) time.sleep(5) except socket.timeout as e: # Specific timeout handling for Python 2.7 print("Timeout error: {} (retrying), will attempt upload again for {} more times".format( filename, max_attempts - attempt - 1)) finally: if response is not None: response.close() else: print('DRY_RUN, Skipping actual image upload. Use this for debug only.')
def upload_file(filepath, max_attempts, url, permission, signature, key=None, aws_key=None)
Upload file at filepath.
2.858375
2.817455
1.014524
c1 = 0xcc9e2d51 c2 = 0x1b873593 length = len(data) h1 = seed roundedEnd = (length & 0xfffffffc) # round down to 4 byte block for i in range(0, roundedEnd, 4): # little endian load order k1 = (ord(data[i]) & 0xff) | ((ord(data[i + 1]) & 0xff) << 8) | \ ((ord(data[i + 2]) & 0xff) << 16) | (ord(data[i + 3]) << 24) k1 *= c1 k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15) k1 *= c2 h1 ^= k1 h1 = (h1 << 13) | ((h1 & 0xffffffff) >> 19) # ROTL32(h1,13) h1 = h1 * 5 + 0xe6546b64 # tail k1 = 0 val = length & 0x03 if val == 3: k1 = (ord(data[roundedEnd + 2]) & 0xff) << 16 # fallthrough if val in [2, 3]: k1 |= (ord(data[roundedEnd + 1]) & 0xff) << 8 # fallthrough if val in [1, 2, 3]: k1 |= ord(data[roundedEnd]) & 0xff k1 *= c1 k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15) k1 *= c2 h1 ^= k1 # finalization h1 ^= length # fmix(h1) h1 ^= ((h1 & 0xffffffff) >> 16) h1 *= 0x85ebca6b h1 ^= ((h1 & 0xffffffff) >> 13) h1 *= 0xc2b2ae35 h1 ^= ((h1 & 0xffffffff) >> 16) return h1 & 0xffffffff
def murmur3_32(data, seed=0)
MurmurHash3 was written by Austin Appleby, and is placed in the public domain. The author hereby disclaims copyright to this source code.
1.382336
1.393729
0.991826
if allow_unicode_keys: if isinstance(key, six.text_type): key = key.encode('utf8') elif isinstance(key, VALID_STRING_TYPES): try: if isinstance(key, bytes): key = key.decode().encode('ascii') else: key = key.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): raise MemcacheIllegalInputError("Non-ASCII key: '%r'" % key) key = key_prefix + key parts = key.split() if len(key) > 250: raise MemcacheIllegalInputError("Key is too long: '%r'" % key) # second statement catches leading or trailing whitespace elif len(parts) > 1 or parts[0] != key: raise MemcacheIllegalInputError("Key contains whitespace: '%r'" % key) elif b'\00' in key: raise MemcacheIllegalInputError("Key contains null: '%r'" % key) return key
def _check_key(key, allow_unicode_keys, key_prefix=b'')
Checks key and add key_prefix.
2.818385
2.768676
1.017954
chunks = [] last_char = b'' while True: # We're reading in chunks, so "\r\n" could appear in one chunk, # or across the boundary of two chunks, so we check for both # cases. # This case must appear first, since the buffer could have # later \r\n characters in it and we want to get the first \r\n. if last_char == b'\r' and buf[0:1] == b'\n': # Strip the last character from the last chunk. chunks[-1] = chunks[-1][:-1] return buf[1:], b''.join(chunks) elif buf.find(b'\r\n') != -1: before, sep, after = buf.partition(b"\r\n") chunks.append(before) return after, b''.join(chunks) if buf: chunks.append(buf) last_char = buf[-1:] buf = _recv(sock, RECV_SIZE) if not buf: raise MemcacheUnexpectedCloseError()
def _readline(sock, buf)
Read line of text from the socket. Read a line of text (delimited by "\r\n") from the socket, and return that line along with any trailing characters read from the socket. Args: sock: Socket object, should be connected. buf: String, zero or more characters, returned from an earlier call to _readline or _readvalue (pass an empty string on the first call). Returns: A tuple of (buf, line) where line is the full line read from the socket (minus the "\r\n" characters) and buf is any trailing characters read after the "\r\n" was found (which may be an empty string).
4.233118
4.234252
0.999732
chunks = [] rlen = size + 2 while rlen - len(buf) > 0: if buf: rlen -= len(buf) chunks.append(buf) buf = _recv(sock, RECV_SIZE) if not buf: raise MemcacheUnexpectedCloseError() # Now we need to remove the \r\n from the end. There are two cases we care # about: the \r\n is all in the last buffer, or only the \n is in the last # buffer, and we need to remove the \r from the penultimate buffer. if rlen == 1: # replace the last chunk with the same string minus the last character, # which is always '\r' in this case. chunks[-1] = chunks[-1][:-1] else: # Just remove the "\r\n" from the latest chunk chunks.append(buf[:rlen - 2]) return buf[rlen:], b''.join(chunks)
def _readvalue(sock, buf, size)
Read specified amount of bytes from the socket. Read size bytes, followed by the "\r\n" characters, from the socket, and return those bytes and any trailing bytes read after the "\r\n". Args: sock: Socket object, should be connected. buf: String, zero or more characters, returned from an earlier call to _readline or _readvalue (pass an empty string on the first call). size: Integer, number of bytes to read from the socket. Returns: A tuple of (buf, value) where value is the bytes read from the socket (there will be exactly size bytes) and buf is trailing characters read after the "\r\n" following the bytes (but not including the \r\n).
4.766297
4.781837
0.99675
while True: try: return sock.recv(size) except IOError as e: if e.errno != errno.EINTR: raise
def _recv(sock, size)
sock.recv() with retry on EINTR
2.447394
1.965207
1.245362
return _check_key(key, allow_unicode_keys=self.allow_unicode_keys, key_prefix=self.key_prefix)
def check_key(self, key)
Checks key and add key_prefix.
6.085316
4.332012
1.404732
if self.sock is not None: try: self.sock.close() except Exception: pass finally: self.sock = None
def close(self)
Close the connection to memcached, if it is open. The next call to a method that requires a connection will re-open it.
2.803717
2.550186
1.099417
if noreply is None: noreply = self.default_noreply return self._store_cmd(b'set', {key: value}, expire, noreply)[key]
def set(self, key, value, expire=0, noreply=None)
The memcached "set" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If no exception is raised, always returns True. If an exception is raised, the set may or may not have occurred. If noreply is True, then a successful return does not guarantee a successful set.
4.329058
5.38205
0.804351
if noreply is None: noreply = self.default_noreply result = self._store_cmd(b'set', values, expire, noreply) return [k for k, v in six.iteritems(result) if not v]
def set_many(self, values, expire=0, noreply=None)
A convenience function for setting multiple values. Args: values: dict(str, str), a dict of keys and values, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: Returns a list of keys that failed to be inserted. If noreply is True, always returns empty list.
3.514362
3.579031
0.981931
if noreply is None: noreply = self.default_noreply return self._store_cmd(b'add', {key: value}, expire, noreply)[key]
def add(self, key, value, expire=0, noreply=None)
The memcached "add" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, the return value is always True. Otherwise the return value is True if the value was stored, and False if it was not (because the key already existed).
4.480626
5.123139
0.874586
if noreply is None: noreply = self.default_noreply return self._store_cmd(b'replace', {key: value}, expire, noreply)[key]
def replace(self, key, value, expire=0, noreply=None)
The memcached "replace" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, always returns True. Otherwise returns True if the value was stored and False if it wasn't (because the key didn't already exist).
4.329761
5.261148
0.822969
if noreply is None: noreply = self.default_noreply return self._store_cmd(b'append', {key: value}, expire, noreply)[key]
def append(self, key, value, expire=0, noreply=None)
The memcached "append" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
4.520548
4.908887
0.920891
if noreply is None: noreply = self.default_noreply return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
def prepend(self, key, value, expire=0, noreply=None)
The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
4.59942
4.992721
0.921225
return self._store_cmd(b'cas', {key: value}, expire, noreply, cas)[key]
def cas(self, key, value, cas, expire=0, noreply=False)
The memcached "cas" command. Args: key: str, see class docs for details. value: str, see class docs for details. cas: int or str that only contains the characters '0'-'9'. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns True. Otherwise returns None if the key didn't exist, False if it existed but had a different cas value and True if it existed and was changed.
7.838087
13.211751
0.593266
return self._fetch_cmd(b'get', [key], False).get(key, default)
def get(self, key, default=None)
The memcached "get" command, but only for one key, as a convenience. Args: key: str, see class docs for details. default: value that will be returned if the key was not found. Returns: The value for the key, or default if the key wasn't found.
14.359983
14.572873
0.985391
defaults = (default, cas_default) return self._fetch_cmd(b'gets', [key], True).get(key, defaults)
def gets(self, key, default=None, cas_default=None)
The memcached "gets" command for one key, as a convenience. Args: key: str, see class docs for details. default: value that will be returned if the key was not found. cas_default: same behaviour as default argument. Returns: A tuple of (value, cas) or (default, cas_defaults) if the key was not found.
9.274706
11.615953
0.798446
if noreply is None: noreply = self.default_noreply cmd = b'delete ' + self.check_key(key) if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'delete', noreply) if noreply: return True return results[0] == b'DELETED'
def delete(self, key, noreply=None)
The memcached "delete" command. Args: key: str, see class docs for details. noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, always returns True. Otherwise returns True if the key was deleted, and False if it wasn't found.
3.207321
3.034126
1.057082
if not keys: return True if noreply is None: noreply = self.default_noreply cmds = [] for key in keys: cmds.append( b'delete ' + self.check_key(key) + (b' noreply' if noreply else b'') + b'\r\n') self._misc_cmd(cmds, b'delete', noreply) return True
def delete_many(self, keys, noreply=None)
A convenience function to delete multiple keys. Args: keys: list(str), the list of keys to delete. noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True. If an exception is raised then all, some or none of the keys may have been deleted. Otherwise all the keys have been sent to memcache for deletion and if noreply is False, they have been acknowledged by memcache.
3.37721
3.406281
0.991466
key = self.check_key(key) cmd = b'incr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'incr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
def incr(self, key, value, noreply=False)
The memcached "incr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
2.883059
3.057772
0.942863
key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
def decr(self, key, value, noreply=False)
The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
2.978593
3.062739
0.972526
if noreply is None: noreply = self.default_noreply key = self.check_key(key) cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'touch', noreply) if noreply: return True return results[0] == b'TOUCHED'
def touch(self, key, expire=0, noreply=None)
The memcached "touch" command. Args: key: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True if the expiration time was updated, False if the key wasn't found.
2.959997
2.977453
0.994137
result = self._fetch_cmd(b'stats', args, False) for key, value in six.iteritems(result): converter = STAT_TYPES.get(key, int) try: result[key] = converter(value) except Exception: pass return result
def stats(self, *args)
The memcached "stats" command. The returned keys depend on what the "stats" command returns. A best effort is made to convert values to appropriate Python types, defaulting to strings when a conversion cannot be made. Args: *arg: extra string arguments to the "stats" command. See the memcached protocol documentation for more information. Returns: A dict of the returned stats.
4.544432
5.007862
0.90746
self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False) return True
def cache_memlimit(self, memlimit)
The memcached "cache_memlimit" command. Args: memlimit: int, the number of megabytes to set as the new cache memory limit. Returns: If no exception is raised, always returns True.
9.871665
9.290656
1.062537
cmd = b"version\r\n" results = self._misc_cmd([cmd], b'version', False) before, _, after = results[0].partition(b' ') if before != b'VERSION': raise MemcacheUnknownError( "Received unexpected response: %s" % results[0]) return after
def version(self)
The memcached "version" command. Returns: A string of the memcached version.
8.373879
7.006271
1.195198
if noreply is None: noreply = self.default_noreply cmd = b'flush_all ' + six.text_type(delay).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'flush_all', noreply) if noreply: return True return results[0] == b'OK'
def flush_all(self, delay=0, noreply=None)
The memcached "flush_all" command. Args: delay: optional int, the number of seconds to wait before flushing, or zero to flush immediately (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
2.966578
2.704057
1.097084
cmd = b"quit\r\n" self._misc_cmd([cmd], b'quit', True) self.close()
def quit(self)
The memcached "quit" command. This will close the connection with memcached. Calling any other method on this object will re-open the connection, so this object can be re-used after quit.
13.323461
10.21391
1.304443
if isinstance(value, list): return value elif isinstance(value, string_type): if allow_filename and os.path.isfile(value): with codecs.open(value, 'r', encoding="utf-8") as handle: return handle.read().splitlines() return value.split(',') else: raise ValueError("Can't create list for input {}".format(value))
def _create_list(value, allow_filename=False)
Create a list from the input value. If the input is a list already, return it. If the input is a comma-separated string, split it.
2.526924
2.514705
1.004859
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level] return ncbi_assembly_level in configured_ncbi_strings
def is_compatible_assembly_level(self, ncbi_assembly_level)
Check if a given ncbi assembly level string matches the configured assembly levels.
7.650985
5.91451
1.293596
config = cls() for slot in cls.__slots__: if slot.startswith('_'): slot = slot[1:] setattr(config, slot, kwargs.pop(slot, cls.get_default(slot))) if kwargs: raise ValueError("Unrecognized option(s): {}".format(kwargs.keys())) return config
def from_kwargs(cls, **kwargs)
Initialise configuration from kwargs.
3.351326
2.966699
1.129648
config = cls() for slot in cls.__slots__: if slot.startswith('_'): slot = slot[1:] if not hasattr(namespace, slot): continue setattr(config, slot, getattr(namespace, slot)) return config
def from_namespace(cls, namespace)
Initialise from argparser Namespace object.
2.995347
2.77295
1.080202
value = cls._DEFAULTS[category] if not value or not isinstance(value, list): return value return value[0]
def get_default(cls, category)
Get the default value of a given category.
4.618651
4.058483
1.138024
value = cls._DEFAULTS[category] if not isinstance(value, list): raise ValueError("{} does not offer choices".format(category)) return value
def get_choices(cls, category)
Get all available options for a category.
6.656843
6.12661
1.086546
parser = argument_parser(version=__version__) args = parser.parse_args() if args.debug: log_level = logging.DEBUG elif args.verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) max_retries = args.retries attempts = 0 ret = args_download(args) while ret == 75 and attempts < max_retries: attempts += 1 logging.error( 'Downloading from NCBI failed due to a connection error, retrying. Retries so far: %s', attempts) ret = args_download(args) return ret
def main()
Build and parse command line.
2.883933
2.777596
1.038284
if columns is None: columns = _DEFAULT_COLUMNS global _METADATA if not _METADATA: _METADATA = MetaData(columns) return _METADATA
def get(columns=None)
Get or create MetaData singleton.
5.363253
3.046304
1.760577
row = self.rowClass() for key, val in entry.items(): if key in self.columns: setattr(row, key, val) row.local_filename = os.path.join('.', os.path.relpath(local_file)) self.rows.append(row)
def add(self, entry, local_file)
Add a metadata row.
3.583122
3.079926
1.163379
handle.write(u"\t".join(self.columns)) handle.write(u"\n") for row in self.rows: row.write(handle)
def write(self, handle)
Write metadata to handle.
3.184872
3.026035
1.05249
try: download_candidates = select_candidates(config) if len(download_candidates) < 1: logging.error("No downloads matched your filter. Please check your options.") return 1 if config.dry_run: print("Considering the following {} assemblies for download:".format(len(download_candidates))) for entry, _ in download_candidates: print(entry['assembly_accession'], entry['organism_name'], sep="\t") return 0 download_jobs = [] for entry, group in download_candidates: download_jobs.extend(create_downloadjob(entry, group, config)) if config.parallel == 1: for dl_job in download_jobs: worker(dl_job) else: # pragma: no cover # Testing multiprocessing code is annoying pool = Pool(processes=config.parallel) jobs = pool.map_async(worker, download_jobs) try: # 0xFFFF is just "a really long time" jobs.get(0xFFFF) except KeyboardInterrupt: # TODO: Actually test this once I figure out how to do this in py.test logging.error("Interrupted by user") return 1 if config.metadata_table: with codecs.open(config.metadata_table, mode='w', encoding='utf-8') as handle: table = metadata.get() table.write(handle) except requests.exceptions.ConnectionError as err: logging.error('Download from NCBI failed: %r', err) # Exit code 75 meas TEMPFAIL in C/C++, so let's stick with that for now. return 75 return 0
def config_download(config)
Run the actual download from NCBI with parameters in a config object. Parameters ---------- config: NgdConfig A configuration object with the download settings Returns ------- int success code
4.788812
4.758882
1.006289
download_candidates = [] for group in config.group: summary_file = get_summary(config.section, group, config.uri, config.use_cache) entries = parse_summary(summary_file) for entry in filter_entries(entries, config): download_candidates.append((entry, group)) return download_candidates
def select_candidates(config)
Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>)
5.16418
4.938971
1.045599
def in_genus_list(species, genus_list): for genus in genus_list: if species.startswith(genus.capitalize()): return True return False new_entries = [] for entry in entries: if config.type_material and config.type_material != ['any']: requested_types = map(lambda x: config._RELATION_TO_TYPE_MATERIAL[x], config.type_material) if not entry['relation_to_type_material'] or entry['relation_to_type_material'] not in requested_types: logging.debug("Skipping assembly with no reference to type material or reference to type material does not match requested") continue else: print(entry['relation_to_type_material']) if config.genus and not in_genus_list(entry['organism_name'], config.genus): logging.debug('Organism name %r does not start with any in %r, skipping', entry['organism_name'], config.genus) continue if config.species_taxid and entry['species_taxid'] not in config.species_taxid: logging.debug('Species TaxID %r does not match with any in %r, skipping', entry['species_taxid'], config.species_taxid) continue if config.taxid and entry['taxid'] not in config.taxid: logging.debug('Organism TaxID %r does not match with any in %r, skipping', entry['taxid'], config.taxid) continue if not config.is_compatible_assembly_accession(entry['assembly_accession']): logging.debug('Skipping entry with incompatible assembly accession %r', entry['assembly_accession']) continue if not config.is_compatible_assembly_level(entry['assembly_level']): logging.debug('Skipping entry with assembly level %r', entry['assembly_level']) continue if config.refseq_category != 'all' \ and entry['refseq_category'] != config.get_refseq_category_string(config.refseq_category): logging.debug('Skipping entry with refseq_category %r, not %r', entry['refseq_category'], config.refseq_category) continue new_entries.append(entry) return new_entries
def filter_entries(entries, config)
Narrrow down which entries to download.
2.498733
2.473573
1.010172
ret = False try: if job.full_url is not None: req = requests.get(job.full_url, stream=True) ret = save_and_check(req, job.local_file, job.expected_checksum) if not ret: return ret ret = create_symlink(job.local_file, job.symlink_path) except KeyboardInterrupt: # pragma: no cover # TODO: Actually test this once I figure out how to do this in py.test logging.debug("Ignoring keyboard interrupt.") return ret
def worker(job)
Run a single download job.
4.727975
4.535498
1.042438
logging.debug('Checking for a cached summary file') cachefile = "{section}_{domain}_assembly_summary.txt".format(section=section, domain=domain) full_cachefile = os.path.join(CACHE_DIR, cachefile) if use_cache and os.path.exists(full_cachefile) and \ datetime.utcnow() - datetime.fromtimestamp(os.path.getmtime(full_cachefile)) < timedelta(days=1): logging.info('Using cached summary.') with codecs.open(full_cachefile, 'r', encoding='utf-8') as fh: return StringIO(fh.read()) logging.debug('Downloading summary for %r/%r uri: %r', section, domain, uri) url = '{uri}/{section}/{domain}/assembly_summary.txt'.format( section=section, domain=domain, uri=uri) req = requests.get(url) if use_cache: try: os.makedirs(CACHE_DIR) except OSError as err: # Errno 17 is "file exists", ignore that, otherwise re-raise if err.errno != 17: raise with codecs.open(full_cachefile, 'w', encoding='utf-8') as fh: fh.write(req.text) return StringIO(req.text)
def get_summary(section, domain, uri, use_cache)
Get the assembly_summary.txt file from NCBI and return a StringIO object for it.
2.34573
2.184162
1.073973
logging.info('Checking record %r', entry['assembly_accession']) full_output_dir = create_dir(entry, config.section, domain, config.output) symlink_path = None if config.human_readable: symlink_path = create_readable_dir(entry, config.section, domain, config.output) checksums = grab_checksums_file(entry) # TODO: Only write this when the checksums file changed with open(os.path.join(full_output_dir, 'MD5SUMS'), 'w') as handle: handle.write(checksums) parsed_checksums = parse_checksums(checksums) download_jobs = [] for fmt in config.file_format: try: if has_file_changed(full_output_dir, parsed_checksums, fmt): download_jobs.append( download_file_job(entry, full_output_dir, parsed_checksums, fmt, symlink_path)) elif need_to_create_symlink(full_output_dir, parsed_checksums, fmt, symlink_path): download_jobs.append( create_symlink_job(full_output_dir, parsed_checksums, fmt, symlink_path)) except ValueError as err: logging.error(err) return download_jobs
def create_downloadjob(entry, domain, config)
Create download jobs for all file formats from a summary file entry.
3.338383
3.234579
1.032092
full_output_dir = os.path.join(output, section, domain, entry['assembly_accession']) try: os.makedirs(full_output_dir) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(full_output_dir): pass else: raise return full_output_dir
def create_dir(entry, section, domain, output)
Create the output directory for the entry if needed.
2.008845
2.029243
0.989948
if domain != 'viral': full_output_dir = os.path.join(output, 'human_readable', section, domain, get_genus_label(entry), get_species_label(entry), get_strain_label(entry)) else: full_output_dir = os.path.join(output, 'human_readable', section, domain, entry['organism_name'].replace(' ', '_'), get_strain_label(entry, viral=True)) try: os.makedirs(full_output_dir) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(full_output_dir): pass else: raise return full_output_dir
def create_readable_dir(entry, section, domain, output)
Create the a human-readable directory to link the entry to if needed.
2.291102
2.211946
1.035786
http_url = convert_ftp_url(entry['ftp_path']) full_url = '{}/md5checksums.txt'.format(http_url) req = requests.get(full_url) return req.text
def grab_checksums_file(entry)
Grab the checksum file for a given entry.
4.964792
4.946477
1.003703
checksums_list = [] for line in checksums_string.split('\n'): try: # skip empty lines if line == '': continue checksum, filename = line.split() # strip leading ./ if filename.startswith('./'): filename = filename[2:] checksums_list.append({'checksum': checksum, 'file': filename}) except ValueError: logging.debug('Skipping over unexpected checksum line %r', line) continue return checksums_list
def parse_checksums(checksums_string)
Parse a file containing checksums and filenames.
3.090489
2.943904
1.049793
pattern = NgdConfig.get_fileending(filetype) filename, expected_checksum = get_name_and_checksum(checksums, pattern) full_filename = os.path.join(directory, filename) # if file doesn't exist, it has changed if not os.path.isfile(full_filename): return True actual_checksum = md5sum(full_filename) return expected_checksum != actual_checksum
def has_file_changed(directory, checksums, filetype='genbank')
Check if the checksum of a given file has changed.
3.952118
3.964271
0.996934
# If we don't have a symlink path, we don't need to create a symlink if symlink_path is None: return False pattern = NgdConfig.get_fileending(filetype) filename, _ = get_name_and_checksum(checksums, pattern) full_filename = os.path.join(directory, filename) symlink_name = os.path.join(symlink_path, filename) if os.path.islink(symlink_name): existing_link = os.readlink(symlink_name) if full_filename == existing_link: return False return True
def need_to_create_symlink(directory, checksums, filetype, symlink_path)
Check if we need to create a symlink for an existing file.
3.155238
3.119377
1.011496
for entry in checksums: if not entry['file'].endswith(end): # wrong file continue # workaround for ..cds_from_genomic.fna.gz and ..rna_from_genomic.fna.gz also # ending in _genomic.fna.gz, causing bogus matches for the plain fasta if '_from_' not in end and '_from_' in entry['file']: # still the wrong file continue filename = entry['file'] expected_checksum = entry['checksum'] return filename, expected_checksum raise ValueError('No entry for file ending in {!r}'.format(end))
def get_name_and_checksum(checksums, end)
Extract a full filename and checksum from the checksums list for a file ending in given end.
6.646365
6.390867
1.039979
hash_md5 = hashlib.md5() with open(filename, 'rb') as handle: for chunk in iter(lambda: handle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
def md5sum(filename)
Calculate the md5sum of a file and return the hexdigest.
1.499925
1.511652
0.992242
pattern = NgdConfig.get_fileending(filetype) filename, expected_checksum = get_name_and_checksum(checksums, pattern) base_url = convert_ftp_url(entry['ftp_path']) full_url = '{}/{}'.format(base_url, filename) local_file = os.path.join(directory, filename) full_symlink = None if symlink_path is not None: full_symlink = os.path.join(symlink_path, filename) # Keep metadata around mtable = metadata.get() mtable.add(entry, local_file) return DownloadJob(full_url, local_file, expected_checksum, full_symlink)
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None)
Generate a DownloadJob that actually triggers a file download.
4.359784
4.118509
1.058583
pattern = NgdConfig.get_fileending(filetype) filename, _ = get_name_and_checksum(checksums, pattern) local_file = os.path.join(directory, filename) full_symlink = os.path.join(symlink_path, filename) return DownloadJob(None, local_file, None, full_symlink)
def create_symlink_job(directory, checksums, filetype, symlink_path)
Create a symlink-creating DownloadJob for an already downloaded file.
4.92347
4.28906
1.147914
with open(local_file, 'wb') as handle: for chunk in response.iter_content(4096): handle.write(chunk) actual_checksum = md5sum(local_file) if actual_checksum != expected_checksum: logging.error('Checksum mismatch for %r. Expected %r, got %r', local_file, expected_checksum, actual_checksum) return False return True
def save_and_check(response, local_file, expected_checksum)
Save the content of an http response and verify the checksum matches.
1.98737
1.942472
1.023114
if symlink_path is not None: if os.path.exists(symlink_path) or os.path.lexists(symlink_path): os.unlink(symlink_path) local_file = os.path.normpath(local_file) symlink_path = os.path.normpath(symlink_path) num_dirs_upward = len(os.path.dirname(symlink_path).split(os.sep)) local_relative_to_symlink = num_dirs_upward * (os.pardir + os.sep) os.symlink(os.path.join(local_relative_to_symlink, local_file), symlink_path) return True
def create_symlink(local_file, symlink_path)
Create a relative symbolic link if symlink path is given. Parameters ---------- local_file relative path to output folder (includes ./ prefix) of file saved symlink_path relative path to output folder (includes ./ prefix) of symbolic link to be created Returns ------- bool success code
2.387522
2.60371
0.916969
def get_strain(entry): strain = entry['infraspecific_name'] if strain != '': strain = strain.split('=')[-1] return strain strain = entry['isolate'] if strain != '': return strain if len(entry['organism_name'].split(' ')) > 2 and not viral: strain = ' '.join(entry['organism_name'].split(' ')[2:]) return strain return entry['assembly_accession'] def cleanup(strain): strain = strain.strip() strain = strain.replace(' ', '_') strain = strain.replace(';', '_') strain = strain.replace('/', '_') strain = strain.replace('\\', '_') return strain return cleanup(get_strain(entry))
def get_strain_label(entry, viral=False)
Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number.
2.318422
1.951883
1.187787
desc = 'Perform queries against the NCBI Taxa database' epi = parser = argparse.ArgumentParser(description=desc, epilog=epi, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('taxid', metavar='taxid', type=str, help='A comma-separated list of TaxIDs and/or taxon names. (e.g. 561,2172)') parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbose behaviour. Supports 3 levels at present: Off = 0, Info = 1, Verbose = 2. (default: %(default)s)') parser.add_argument('-d', '--database', type=str, default=None, help='NCBI taxonomy database file path. If "None", it will be downloaded (default: %(default)s)') parser.add_argument('-u', '--update', action='store_true', default=False, help='Update the local taxon database before querying. Recommended if not used for a while. (default: %(default)s)') parser.add_argument('-j', '--just-taxids', action='store_true', default=False, help='Just write out a list of taxids an no other information (default: %(default)s)') parser.add_argument('-i', '--taxon-info', action='store_true', default=False, help='Just write out rank & lineage info on the provided taxids (default: %(default)s)') parser.add_argument('-o', '--outfile', action='store', help='Output file to store the descendent TaxIDs for the query.') return parser.parse_args()
def get_args()
Parse command line arguments
3.584219
3.525531
1.016647
for key, value in d.items(): print(' ' * indent + str(key)) if isinstance(value, dict): pretty(value, indent+1) else: sys.stderr.write(' ' * (indent+1) + str(value) + '\n')
def pretty(d, indent=0)
A prettier way to print nested dicts
1.906187
1.835253
1.038651
# Main feature of the script is to get all taxa within a given group. descendent_taxa = ncbi.get_descendant_taxa(taxid) descendent_taxa_names = ncbi.translate_to_names(descendent_taxa) if just_taxids: for taxid in descendent_taxa: outFH.write(str(taxid) + '\n') else: for dtn, dt in zip(descendent_taxa_names, descendent_taxa): x = [str(x) for x in [taxid, dt, dtn]] outFH.write('\t'.join(x) + '\n')
def desc_taxa(taxid, ncbi, outFH, just_taxids=False)
Write descendent taxa for taxid
2.749758
2.734225
1.005681
taxid = int(taxid) tax_name = ncbi.get_taxid_translator([taxid])[taxid] rank = list(ncbi.get_rank([taxid]).values())[0] lineage = ncbi.get_taxid_translator(ncbi.get_lineage(taxid)) lineage = ['{}:{}'.format(k,v) for k,v in lineage.items()] lineage = ';'.join(lineage) x = [str(x) for x in [tax_name, taxid, rank, lineage]] outFH.write('\t'.join(x) + '\n')
def taxon_info(taxid, ncbi, outFH)
Write info on taxid
1.974577
1.966489
1.004113
new_taxids = [] for taxid in taxids: try: new_taxids.append(ncbi.get_name_translator([taxid])[taxid][0]) except KeyError: try: new_taxids.append(int(taxid)) except ValueError: msg = 'Error: cannot convert to taxid: {}' raise ValueError(msg.format(taxid)) return new_taxids
def name2taxid(taxids, ncbi)
Converting taxon names to taxids
2.133932
2.06142
1.035176
# Get commandline args args = get_args() # Instantiate the ete NCBI taxa object ncbi = NCBITaxa(dbfile=args.database) ## dbfile location if args.verbose > 1: sys.stderr.write('Taxa database is stored at {}\n'.format(ncbi.dbfile)) # Update the database if required. if args.update is True: if args.verbose > 1: msg = 'Updating the taxonomy database. This may take several minutes...\n' sys.stderr.write(msg) ncbi.update_taxonomy_database() # If names were provided in taxid list, convert to taxids args.taxid = args.taxid.replace('"', '').replace("'", '').split(',') args.taxid = name2taxid(args.taxid, ncbi) # Output if args.outfile is None: outFH = sys.stdout else: outFH = open(args.outfile, 'w') ## header if args.taxon_info: outFH.write('\t'.join(['name', 'taxid', 'rank', 'lineage']) + '\n') elif not args.just_taxids: outFH.write('\t'.join(['parent_taxid', 'descendent_taxid', 'descendent_name']) + '\n') ## body for taxid in args.taxid: if args.taxon_info: taxon_info(taxid, ncbi, outFH) else: desc_taxa(taxid, ncbi, outFH, args.just_taxids) outFH.close()
def main()
Make queries against NCBI Taxa databases
3.452172
3.370814
1.024136
''' Returns the final optimal value and choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. P : [numpy.array] A numpy.array that holds the discrete choice probabilities ''' # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. Pflat = np.argmax(Vals, axis=0) V = np.zeros(Vals[0].shape) Probs = np.zeros(Vals.shape) for i in range(Vals.shape[0]): optimalIndices = Pflat == i V[optimalIndices] = Vals[i][optimalIndices] Probs[i][optimalIndices] = 1 return V, Probs # else we have a taste shock maxV = np.max(Vals, axis=0) # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0) LogSumV = np.log(sumexp) LogSumV = maxV + sigma*LogSumV Probs = np.exp((Vals-LogSumV)/sigma) return LogSumV, Probs
def calcLogSumChoiceProbs(Vals, sigma)
Returns the final optimal value and choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. P : [numpy.array] A numpy.array that holds the discrete choice probabilities
4.643164
2.600515
1.785478
''' Returns the choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- Probs : [numpy.array] A numpy.array that holds the discrete choice probabilities ''' # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. Pflat = np.argmax(Vals, axis=0) Probs = np.zeros(Vals.shape) for i in range(Vals.shape[0]): Probs[i][Pflat==i] = 1 return Probs maxV = np.max(Vals, axis=0) Probs = np.divide(np.exp((Vals-maxV)/sigma), np.sum(np.exp((Vals-maxV)/sigma), axis=0)) return Probs
def calcChoiceProbs(Vals, sigma)
Returns the choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- Probs : [numpy.array] A numpy.array that holds the discrete choice probabilities
4.834671
2.405575
2.009778
''' Returns the optimal value given the choice specific value functions Vals. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. ''' # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. V = np.amax(Vals, axis=0) return V # else we have a taste shock maxV = np.max(Vals, axis=0) # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0) LogSumV = np.log(sumexp) LogSumV = maxV + sigma*LogSumV return LogSumV
def calcLogSum(Vals, sigma)
Returns the optimal value given the choice specific value functions Vals. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function.
6.33258
3.259694
1.942692
''' Evaluates the derivative of the interpolated function at the given input. Parameters ---------- x : np.array or float Real values to be evaluated in the interpolated function. Returns ------- dydx : np.array or float The interpolated function's first derivative evaluated at x: dydx = f'(x), with the same shape as x. ''' z = np.asarray(x) return (self._der(z.flatten())).reshape(z.shape)
def derivative(self,x)
Evaluates the derivative of the interpolated function at the given input. Parameters ---------- x : np.array or float Real values to be evaluated in the interpolated function. Returns ------- dydx : np.array or float The interpolated function's first derivative evaluated at x: dydx = f'(x), with the same shape as x.
4.935203
1.946709
2.535152