code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
"Return a django.core.paginator.Page of results." limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT) paginator = Paginator(results, limit) page = options.get('page', 1) try: results = paginator.page(page) except (EmptyPage, InvalidPage): results = paginator.page(paginator.num_pages) return results
def paginate_results(self, results, options)
Return a django.core.paginator.Page of results.
2.860694
2.334857
1.225212
"Match results to given term and return the serialized HttpResponse." results = {} form = self.form(request.GET) if form.is_valid(): options = form.cleaned_data term = options.get('term', '') raw_data = self.get_query(request, term) results = self.format_results(raw_data, options) return self.response(results)
def results(self, request)
Match results to given term and return the serialized HttpResponse.
4.973774
3.153954
1.576997
''' Returns a python structure that later gets serialized. raw_data full list of objects matching the search term options a dictionary of the given options ''' page_data = self.paginate_results(raw_data, options) results = {} meta = options.copy() meta['more'] = _('Show more results') if page_data and page_data.has_next(): meta['next_page'] = page_data.next_page_number() if page_data and page_data.has_previous(): meta['prev_page'] = page_data.previous_page_number() results['data'] = [self.format_item(item) for item in page_data.object_list] results['meta'] = meta return results
def format_results(self, raw_data, options)
Returns a python structure that later gets serialized. raw_data full list of objects matching the search term options a dictionary of the given options
3.546672
2.153901
1.646627
from selectable.base import LookupBase if isinstance(lookup_class, string_types): mod_str, cls_str = lookup_class.rsplit('.', 1) mod = import_module(mod_str) lookup_class = getattr(mod, cls_str) if not issubclass(lookup_class, LookupBase): raise TypeError('lookup_class must extend from selectable.base.LookupBase') return lookup_class
def import_lookup_class(lookup_class)
Import lookup_class as a dotted base and ensure it extends LookupBase
2.05948
1.929733
1.067236
"Ensure given limit is less than default if defined" limit = self.cleaned_data.get('limit', None) if (settings.SELECTABLE_MAX_LIMIT is not None and (not limit or limit > settings.SELECTABLE_MAX_LIMIT)): limit = settings.SELECTABLE_MAX_LIMIT return limit
def clean_limit(self)
Ensure given limit is less than default if defined
4.977926
3.514053
1.416577
# build request url params = collections.OrderedDict() params["gbv"] = "2" params["q"] = "\"%s\" \"%s\" front cover" % (artist, album) if abs(self.target_size - 500) < 300: params["tbs"] = "isz:m" elif self.target_size > 800: params["tbs"] = "isz:l" return __class__.assembleUrl(__class__.BASE_URL, params)
def getSearchUrl(self, album, artist)
See CoverSource.getSearchUrl.
5.482882
5.284925
1.037457
results = [] # parse HTML and get results parser = lxml.etree.HTMLParser() html = lxml.etree.XML(api_data.decode("latin-1"), parser) for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1): # extract url metadata_div = result.find("div") metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text") metadata_json = json.loads(metadata_json) google_url = result.find("a").get("href") if google_url is not None: query = urllib.parse.urlsplit(google_url).query else: query = None if not query: img_url = metadata_json["ou"] else: query = urllib.parse.parse_qs(query) img_url = query["imgurl"][0] # extract format check_metadata = CoverImageMetadata.NONE format = metadata_json["ity"].lower() try: format = SUPPORTED_IMG_FORMATS[format] except KeyError: # format could not be identified or is unknown format = None check_metadata = CoverImageMetadata.FORMAT # extract size if not query: size = metadata_json["ow"], metadata_json["oh"] else: size = tuple(map(int, (query["w"][0], query["h"][0]))) # extract thumbnail url thumbnail_url = metadata_json["tu"] # result results.append(GoogleImagesCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, rank=rank, check_metadata=check_metadata)) return results
async def parseResults(self, api_data)
See CoverSource.parseResults.
3.402377
3.263541
1.042541
async with self.lock: while True: last_access_ts = self.__getLastAccess() if last_access_ts is not None: now = time.time() last_access_ts = last_access_ts[0] time_since_last_access = now - last_access_ts if time_since_last_access < self.min_delay_between_accesses: time_to_wait = self.min_delay_between_accesses - time_since_last_access if self.jitter_range_ms is not None: time_to_wait += random.randint(*self.jitter_range_ms) / 1000 self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000, self.domain)) await asyncio.sleep(time_to_wait) access_time = time.time() self.__access(access_time) # now we should be good... except if another process did the same query at the same time # the database serves as an atomic lock, query again to be sure the last row is the one # we just inserted last_access_ts = self.__getLastAccess() if last_access_ts[0] == access_time: break
async def waitAccessAsync(self)
Wait the needed time before sending a request to honor rate limit.
3.38865
3.304513
1.025461
with self.connection: self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)", (ts, self.domain))
def __access(self, ts)
Record an API access.
6.151445
5.241507
1.173602
return aiohttp.ClientTimeout(total=None, connect=None, sock_connect=socket_timeout_s, sock_read=socket_timeout_s)
def aiohttp_socket_timeout(socket_timeout_s)
Return a aiohttp.ClientTimeout object with only socket timeouts set.
2.917072
2.323689
1.255362
async def store_in_cache_callback(): pass if cache is not None: # try from cache first if post_data is not None: if (url, post_data) in cache: self.logger.debug("Got data for URL '%s' %s from cache" % (url, dict(post_data))) return store_in_cache_callback, cache[(url, post_data)] elif url in cache: self.logger.debug("Got data for URL '%s' from cache" % (url)) return store_in_cache_callback, cache[url] domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath, url, self.min_delay_between_accesses, jitter_range_ms=self.jitter_range_ms, logger=self.logger) for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS, sleeptime=1, max_sleeptime=HTTP_MAX_RETRY_SLEEP_S, sleepscale=1.5), 1): await domain_rate_watcher.waitAccessAsync() try: if post_data is not None: async with self.session.post(url, data=post_data, headers=self._buildHeaders(headers), timeout=HTTP_NORMAL_TIMEOUT, ssl=verify) as response: content = await response.read() else: async with self.session.get(url, headers=self._buildHeaders(headers), timeout=HTTP_NORMAL_TIMEOUT, ssl=verify) as response: content = await response.read() if cache is not None: async def store_in_cache_callback(): if pre_cache_callback is not None: # process try: data = await pre_cache_callback(content) except Exception: data = content else: data = content # add to cache if post_data is not None: cache[(url, post_data)] = data else: cache[url] = data except (asyncio.TimeoutError, aiohttp.ClientError) as e: self.logger.warning("Querying '%s' failed (attempt %u/%u): %s %s" % (url, attempt, HTTP_MAX_ATTEMPTS, e.__class__.__qualname__, e)) if attempt == HTTP_MAX_ATTEMPTS: raise else: self.logger.debug("Retrying in %.3fs" % (time_to_sleep)) await asyncio.sleep(time_to_sleep) else: break # http retry loop response.raise_for_status() return store_in_cache_callback, content
async def query(self, url, *, post_data=None, headers=None, verify=True, cache=None, pre_cache_callback=None)
Send a GET/POST request or get data from cache, retry if it fails, and return a tuple of store in cache callback, response content.
2.947827
2.849373
1.034553
if (cache is not None) and (url in cache): # try from cache first self.logger.debug("Got headers for URL '%s' from cache" % (url)) resp_ok, response_headers = pickle.loads(cache[url]) return resp_ok domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath, url, self.min_delay_between_accesses, jitter_range_ms=self.jitter_range_ms, logger=self.logger) resp_ok = True try: for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS, sleeptime=0.5, max_sleeptime=HTTP_MAX_RETRY_SLEEP_SHORT_S, sleepscale=1.5), 1): await domain_rate_watcher.waitAccessAsync() try: async with self.session.head(url, headers=self._buildHeaders(headers), timeout=HTTP_SHORT_TIMEOUT, ssl=verify) as response: pass except (asyncio.TimeoutError, aiohttp.ClientError) as e: self.logger.warning("Probing '%s' failed (attempt %u/%u): %s %s" % (url, attempt, HTTP_MAX_ATTEMPTS, e.__class__.__qualname__, e)) if attempt == HTTP_MAX_ATTEMPTS: resp_ok = False else: self.logger.debug("Retrying in %.3fs" % (time_to_sleep)) await asyncio.sleep(time_to_sleep) else: response.raise_for_status() if response_headers is not None: response_headers.update(response.headers) break # http retry loop except aiohttp.ClientResponseError as e: self.logger.debug("Probing '%s' failed: %s %s" % (url, e.__class__.__qualname__, e)) resp_ok = False if cache is not None: # store in cache cache[url] = pickle.dumps((resp_ok, response_headers)) return resp_ok
async def isReachable(self, url, *, headers=None, verify=True, response_headers=None, cache=None)
Send a HEAD request with short timeout or get data from cache, return True if ressource has 2xx status code, False instead.
3.398748
3.309471
1.026976
response = await self.session.get(url, headers=self._buildHeaders(headers), timeout=HTTP_SHORT_TIMEOUT, ssl=verify) response.raise_for_status() return response
async def fastStreamedQuery(self, url, *, headers=None, verify=True)
Send a GET request with short timeout, do not retry, and return streamed response.
4.867872
3.733702
1.303766
# build request url params = collections.OrderedDict() params["method"] = "album.getinfo" params["api_key"] = __class__.API_KEY params["album"] = album params["artist"] = artist return __class__.assembleUrl(__class__.BASE_URL, params)
def getSearchUrl(self, album, artist)
See CoverSource.getSearchUrl.
3.543509
3.350292
1.057672
char_blacklist = set(string.punctuation) char_blacklist.remove("'") char_blacklist.remove("&") char_blacklist = frozenset(char_blacklist) return __class__.unpunctuate(s.lower(), char_blacklist=char_blacklist)
def processQueryString(self, s)
See CoverSource.processQueryString.
5.472283
5.337078
1.025333
results = [] # get xml results list xml_text = api_data.decode("utf-8") xml_root = xml.etree.ElementTree.fromstring(xml_text) status = xml_root.get("status") if status != "ok": raise Exception("Unexpected Last.fm response status: %s" % (status)) img_elements = xml_root.findall("album/image") # build results from xml thumbnail_url = None thumbnail_size = None for img_element in img_elements: img_url = img_element.text if not img_url: # last.fm returns empty image tag for size it does not have continue lfm_size = img_element.get("size") if lfm_size == "mega": check_metadata = CoverImageMetadata.SIZE else: check_metadata = CoverImageMetadata.NONE try: size = __class__.SIZES[lfm_size] except KeyError: continue if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)): thumbnail_url = img_url thumbnail_size = size[0] format = os.path.splitext(img_url)[1][1:].lower() format = SUPPORTED_IMG_FORMATS[format] results.append(LastFmCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, check_metadata=check_metadata)) return results
async def parseResults(self, api_data)
See CoverSource.parseResults.
3.531352
3.393615
1.040587
# register sources source_args = (size, size_tolerance_prct) cover_sources = [sources.LastFmCoverSource(*source_args), sources.AmazonCdCoverSource(*source_args), sources.AmazonDigitalCoverSource(*source_args)] for tld in amazon_tlds: cover_sources.append(sources.AmazonCdCoverSource(*source_args, tld=tld)) if not no_lq_sources: cover_sources.append(sources.GoogleImagesWebScrapeCoverSource(*source_args)) # schedule search work search_futures = [] for cover_source in cover_sources: coroutine = cover_source.search(album, artist) future = asyncio.ensure_future(coroutine, loop=async_loop) search_futures.append(future) # wait for it await asyncio.wait(search_futures, loop=async_loop) # get results results = [] for future in search_futures: source_results = future.result() results.extend(source_results) # sort results results = await CoverSourceResult.preProcessForComparison(results, size, size_tolerance_prct) results.sort(reverse=True, key=functools.cmp_to_key(functools.partial(CoverSourceResult.compare, target_size=size, size_tolerance_prct=size_tolerance_prct))) if not results: logging.getLogger("Main").info("No results") # download for result in results: try: await result.get(format, size, size_tolerance_prct, out_filepath) except Exception as e: logging.getLogger("Main").warning("Download of %s failed: %s %s" % (result, e.__class__.__qualname__, e)) continue else: return True return False
async def search_and_download(album, artist, format, size, out_filepath, *, size_tolerance_prct, amazon_tlds, no_lq_sources, async_loop)
Search and download a cover, return True if success, False instead.
2.850923
2.781927
1.024801
url = "%s/search" % (__class__.BASE_URL) params = collections.OrderedDict() params["search-alias"] = "digital-music" params["field-keywords"] = " ".join((artist, album)) params["sort"] = "relevancerank" return __class__.assembleUrl(url, params)
def getSearchUrl(self, album, artist)
See CoverSource.getSearchUrl.
5.969135
5.783729
1.032057
results = [] # parse page parser = lxml.etree.HTMLParser() html = lxml.etree.XML(api_data.decode("utf-8"), parser) for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS): result_nodes = result_selector(html) if result_nodes: break for rank, result_node in enumerate(result_nodes, 1): # get thumbnail & full image url img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0] thumbnail_url = img_node.get("src") thumbnail_url = thumbnail_url.replace("Stripe-Prime-Only", "") url_parts = thumbnail_url.rsplit(".", 2) img_url = ".".join((url_parts[0], url_parts[2])) # assume size is fixed size = (500, 500) # try to get higher res image... if self.target_size > size[0]: # ...but only if needed self.logger.debug("Looking for optimal subimages configuration...") product_url = __class__.LINK_SELECTOR(result_node)[0].get("href") product_url = urllib.parse.urlsplit(product_url) product_id = product_url.path.split("/")[3] # TODO don't pick up highest res image if user asked less? for amazon_img_format in AMAZON_DIGITAL_IMAGE_FORMATS: # TODO review this, it seem to always fail now self.logger.debug("Trying %u subimages..." % (amazon_img_format.slice_count ** 2)) urls = tuple(self.generateImgUrls(product_id, __class__.DYNAPI_KEY, amazon_img_format.id, amazon_img_format.slice_count)) url_ok = await self.probeUrl(urls[-1]) if not url_ok: # images at this size are not available continue # images at this size are available img_url = urls size = (amazon_img_format.total_res,) * 2 break # assume format is always jpg format = CoverImageFormat.JPEG # add result results.append(AmazonDigitalCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, rank=rank, check_metadata=CoverImageMetadata.SIZE)) return results
async def parseResults(self, api_data)
See CoverSource.parseResults.
5.833104
5.689994
1.025151
for x in range(slice_count): for y in range(slice_count): yield ("http://z2-ec2.images-amazon.com/R/1/a=" + product_id + "+c=" + dynapi_key + "+d=_SCR%28" + str(format_id) + "," + str(x) + "," + str(y) + "%29_=.jpg")
def generateImgUrls(self, product_id, dynapi_key, format_id, slice_count)
Generate URLs for slice_count^2 subimages of a product.
7.190877
6.593605
1.090584
assert(max_attempts > 1) assert(sleeptime >= 0) assert(0 <= jitter <= sleeptime) assert(sleepscale >= 1) cur_sleeptime = min(max_sleeptime, sleeptime) for attempt in range(max_attempts): cur_jitter = random.randint(int(-jitter * 1000), int(jitter * 1000)) / 1000 yield max(0, cur_sleeptime + cur_jitter) cur_sleeptime = min(max_sleeptime, cur_sleeptime * sleepscale)
def retrier(*, max_attempts, sleeptime, max_sleeptime, sleepscale=1.5, jitter=0.2)
Generator yielding time to wait for, after the attempt, if it failed.
2.213489
2.055659
1.076778
if self.source_quality.value <= CoverSourceQuality.LOW.value: logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search") images_data = [] for i, url in enumerate(self.urls): # download logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls))) headers = {} self.source.updateHttpHeaders(headers) async def pre_cache_callback(img_data): return await __class__.crunch(img_data, self.format) store_in_cache_callback, image_data = await self.source.http.query(url, headers=headers, verify=False, cache=__class__.image_cache, pre_cache_callback=pre_cache_callback) # store immediately in cache await store_in_cache_callback() # append for multi images images_data.append(image_data) need_format_change = (self.format != target_format) need_size_change = ((max(self.size) > target_size) and (abs(max(self.size) - target_size) > target_size * size_tolerance_prct / 100)) need_join = len(images_data) > 1 if need_join or need_format_change or need_size_change: # post process image_data = self.postProcess(images_data, target_format if need_format_change else None, target_size if need_size_change else None) # crunch image again image_data = await __class__.crunch(image_data, target_format) # write it with open(out_filepath, "wb") as file: file.write(image_data)
async def get(self, target_format, target_size, size_tolerance_prct, out_filepath)
Download cover and process it.
4.076368
3.957857
1.029943
if len(images_data) == 1: in_bytes = io.BytesIO(images_data[0]) img = PIL.Image.open(in_bytes) if img.mode != "RGB": img = img.convert("RGB") else: # images need to be joined before further processing logging.getLogger("Cover").info("Joining %u images..." % (len(images_data))) # TODO find a way to do this losslessly for JPEG new_img = PIL.Image.new("RGB", self.size) assert(is_square(len(images_data))) sq = int(math.sqrt(len(images_data))) images_data_it = iter(images_data) img_sizes = {} for x in range(sq): for y in range(sq): current_image_data = next(images_data_it) img_stream = io.BytesIO(current_image_data) img = PIL.Image.open(img_stream) img_sizes[(x, y)] = img.size box = [0, 0] if x > 0: for px in range(x): box[0] += img_sizes[(px, y)][0] if y > 0: for py in range(y): box[1] += img_sizes[(x, py)][1] box.extend((box[0] + img.size[0], box[1] + img.size[1])) new_img.paste(img, box=tuple(box)) img = new_img out_bytes = io.BytesIO() if new_size is not None: logging.getLogger("Cover").info("Resizing from %ux%u to %ux%u..." % (self.size[0], self.size[1], new_size, new_size)) img = img.resize((new_size, new_size), PIL.Image.LANCZOS) # apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02) # we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over # parameters unsharper = PIL.ImageFilter.UnsharpMask(radius=1.5, percent=70, threshold=5) img = img.filter(unsharper) if new_format is not None: logging.getLogger("Cover").info("Converting to %s..." % (new_format.name.upper())) target_format = new_format else: target_format = self.format img.save(out_bytes, format=target_format.name, quality=90, optimize=True) return out_bytes.getvalue()
def postProcess(self, images_data, new_format, new_size)
Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data.
2.936589
2.934717
1.000638
assert(self.needMetadataUpdate()) width_sum, height_sum = 0, 0 # only download metadata for the needed images to get full size idxs = [] assert(is_square(len(self.urls))) sq = int(math.sqrt(len(self.urls))) for x in range(sq): for y in range(sq): if x == y: idxs.append((x * sq + y, x, y)) for idx, x, y in idxs: url = self.urls[idx] format, width, height = None, None, None try: format, width, height = pickle.loads(__class__.metadata_cache[url]) except KeyError: # cache miss pass except Exception as e: logging.getLogger("Cover").warning("Unable to load metadata for URL '%s' from cache: %s %s" % (url, e.__class__.__qualname__, e)) else: # cache hit logging.getLogger("Cover").debug("Got metadata for URL '%s' from cache" % (url)) if format is not None: self.setFormatMetadata(format) if (self.needMetadataUpdate(CoverImageMetadata.FORMAT) or (self.needMetadataUpdate(CoverImageMetadata.SIZE) and ((width is None) or (height is None)))): # download logging.getLogger("Cover").debug("Downloading file header for URL '%s'..." % (url)) try: headers = {} self.source.updateHttpHeaders(headers) response = await self.source.http.fastStreamedQuery(url, headers=headers, verify=False) try: if self.needMetadataUpdate(CoverImageMetadata.FORMAT): # try to get format from response format = __class__.guessImageFormatFromHttpResponse(response) if format is not None: self.setFormatMetadata(format) if self.needMetadataUpdate(): # try to get metadata from HTTP data metadata = await __class__.guessImageMetadataFromHttpData(response) if metadata is not None: format, width, height = metadata if format is not None: self.setFormatMetadata(format) finally: response.release() except Exception as e: logging.getLogger("Cover").warning("Failed to get file metadata for URL '%s' " "(%s %s)" % (url, e.__class__.__qualname__, e)) if self.needMetadataUpdate(): # did we fail to get needed metadata at this point? if ((self.format is None) or ((self.size is None) and ((width is None) or (height is None)))): # if we get here, file is probably not reachable, or not even an image logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', " "skipping this result" % (url)) return if ((self.format is not None) and ((self.size is not None) and (width is None) and (height is None))): logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', " "falling back to API data" % (url)) self.check_metadata = CoverImageMetadata.NONE self.reliable_metadata = False return # save it to cache __class__.metadata_cache[url] = pickle.dumps((format, width, height)) # sum sizes if (width is not None) and (height is not None): width_sum += width height_sum += height if self.needMetadataUpdate(CoverImageMetadata.SIZE) and (width_sum > 0) and (height_sum > 0): self.setSizeMetadata((width_sum, height_sum))
async def updateImageMetadata(self)
Partially download image file(s) to get its real metadata, or get it from cache.
3.235178
3.162618
1.022943
assert((self.needMetadataUpdate(CoverImageMetadata.FORMAT)) or (self.format is format)) self.format = format self.check_metadata &= ~CoverImageMetadata.FORMAT
def setFormatMetadata(self, format)
Set format image metadata to what has been reliably identified.
14.972317
14.955585
1.001119
assert((self.needMetadataUpdate(CoverImageMetadata.SIZE)) or (self.size == size)) self.size = size self.check_metadata &= ~CoverImageMetadata.SIZE
def setSizeMetadata(self, size)
Set size image metadata to what has been reliably identified.
13.137888
12.566401
1.045477
assert(self.thumbnail_sig is None) if self.thumbnail_url is None: logging.getLogger("Cover").warning("No thumbnail available for %s" % (self)) return # download logging.getLogger("Cover").debug("Downloading cover thumbnail '%s'..." % (self.thumbnail_url)) headers = {} self.source.updateHttpHeaders(headers) async def pre_cache_callback(img_data): return await __class__.crunch(img_data, CoverImageFormat.JPEG, silent=True) try: store_in_cache_callback, image_data = await self.source.http.query(self.thumbnail_url, cache=__class__.image_cache, headers=headers, pre_cache_callback=pre_cache_callback) except Exception as e: logging.getLogger("Cover").warning("Download of '%s' failed: %s %s" % (self.thumbnail_url, e.__class__.__qualname__, e)) return # compute sig logging.getLogger("Cover").debug("Computing signature of %s..." % (self)) try: self.thumbnail_sig = __class__.computeImgSignature(image_data) except Exception as e: logging.getLogger("Cover").warning("Failed to compute signature of '%s': %s %s" % (self, e.__class__.__qualname__, e)) else: await store_in_cache_callback()
async def updateSignature(self)
Calculate a cover's "signature" using its thumbnail url.
3.666364
3.411572
1.074685
for c in (first, second): assert(c.format is not None) assert(isinstance(c.size[0], int) and isinstance(c.size[1], int)) # prefer square covers #1 delta_ratio1 = abs(first.size[0] / first.size[1] - 1) delta_ratio2 = abs(second.size[0] / second.size[1] - 1) if abs(delta_ratio1 - delta_ratio2) > 0.15: return -1 if (delta_ratio1 > delta_ratio2) else 1 # prefer similar to reference sr1 = first.is_similar_to_reference sr2 = second.is_similar_to_reference if sr1 and (not sr2): return 1 if (not sr1) and sr2: return -1 # prefer size above preferred delta_size1 = ((first.size[0] + first.size[1]) / 2) - target_size delta_size2 = ((second.size[0] + second.size[1]) / 2) - target_size if (((delta_size1 < 0) and (delta_size2 >= 0)) or (delta_size1 >= 0) and (delta_size2 < 0)): return -1 if (delta_size1 < delta_size2) else 1 # if both below target size, prefer closest if (delta_size1 < 0) and (delta_size2 < 0) and (delta_size1 != delta_size2): return -1 if (delta_size1 < delta_size2) else 1 # prefer covers of most reliable source qs1 = first.source_quality.value qs2 = second.source_quality.value if qs1 != qs2: return -1 if (qs1 < qs2) else 1 # prefer best ranked if ((first.rank is not None) and (second.rank is not None) and (first.__class__ is second.__class__) and (first.rank != second.rank)): return -1 if (first.rank > second.rank) else 1 # prefer reliable metadata if first.reliable_metadata != second.reliable_metadata: return 1 if first.reliable_metadata else -1 # prefer covers with less images to join ic1 = len(first.urls) ic2 = len(second.urls) if ic1 != ic2: return -1 if (ic1 > ic2) else 1 # prefer the preferred size if abs(delta_size1) != abs(delta_size2): return -1 if (abs(delta_size1) > abs(delta_size2)) else 1 # prefer png if first.format != second.format: return -1 if (second.format is CoverImageFormat.PNG) else 1 # prefer square covers #2 if (delta_ratio1 != delta_ratio2): return -1 if (delta_ratio1 > delta_ratio2) else 1 # fuck, they are the same! return 0
def compare(first, second, *, target_size, size_tolerance_prct)
Compare cover relevance/quality. Return -1 if first is a worst match than second, 1 otherwise, or 0 if cover can't be discriminated. This code is responsible for comparing two cover results to identify the best one, and is used to sort all results. It is probably the most important piece of code of this tool. Covers with sizes under the target size (+- configured tolerance) are excluded before comparison. The following factors are used in order: 1. Prefer approximately square covers 2. Prefer covers similar to the reference cover 3. Prefer size above target size 4. If both below target size, prefer closest 5. Prefer covers of most reliable source 6. Prefer best ranked cover 7. Prefer covers with reliable metadata If all previous factors do not allow sorting of two results (very unlikely): 8. Prefer covers with less images to join 9. Prefer covers having the target size 10. Prefer PNG covers 11. Prefer exactly square covers We don't overload the __lt__ operator because we need to pass the target_size parameter.
2.448941
1.98057
1.236483
if (((format is CoverImageFormat.PNG) and (not HAS_OPTIPNG)) or ((format is CoverImageFormat.JPEG) and (not HAS_JPEGOPTIM))): return image_data with mkstemp_ctx.mkstemp(suffix=".%s" % (format.name.lower())) as tmp_out_filepath: if not silent: logging.getLogger("Cover").info("Crunching %s image..." % (format.name.upper())) with open(tmp_out_filepath, "wb") as tmp_out_file: tmp_out_file.write(image_data) size_before = len(image_data) if format is CoverImageFormat.PNG: cmd = ["optipng", "-quiet", "-o1"] elif format is CoverImageFormat.JPEG: cmd = ["jpegoptim", "-q", "--strip-all"] cmd.append(tmp_out_filepath) p = await asyncio.create_subprocess_exec(*cmd, stdin=asyncio.subprocess.DEVNULL, stdout=asyncio.subprocess.DEVNULL, stderr=asyncio.subprocess.DEVNULL) await p.wait() if p.returncode != 0: if not silent: logging.getLogger("Cover").warning("Crunching image failed") return image_data with open(tmp_out_filepath, "rb") as tmp_out_file: crunched_image_data = tmp_out_file.read() size_after = len(crunched_image_data) pct_saved = 100 * (size_before - size_after) / size_before if not silent: logging.getLogger("Cover").debug("Crunching image saved %.2f%% filesize" % (pct_saved)) return crunched_image_data
async def crunch(image_data, format, silent=False)
Crunch image data, and return the processed data, or orignal data if operation failed.
2.284969
2.247702
1.01658
format, width, height = None, None, None img_stream = io.BytesIO(img_data) try: img = PIL.Image.open(img_stream) except IOError: format = imghdr.what(None, h=img_data) format = SUPPORTED_IMG_FORMATS.get(format, None) else: format = img.format.lower() format = SUPPORTED_IMG_FORMATS.get(format, None) width, height = img.size return format, width, height
def guessImageMetadataFromData(img_data)
Identify an image format and size from its first bytes.
2.271628
2.259923
1.00518
metadata = None img_data = bytearray() while len(img_data) < CoverSourceResult.MAX_FILE_METADATA_PEEK_SIZE: new_img_data = await response.content.read(__class__.METADATA_PEEK_SIZE_INCREMENT) if not new_img_data: break img_data.extend(new_img_data) metadata = __class__.guessImageMetadataFromData(img_data) if (metadata is not None) and all(metadata): return metadata return metadata
async def guessImageMetadataFromHttpData(response)
Identify an image format and size from the beginning of its HTTP data.
4.556139
4.403642
1.03463
extensions = [] # try to guess extension from response content-type header try: content_type = response.headers["Content-Type"] except KeyError: pass else: ext = mimetypes.guess_extension(content_type, strict=False) if ext is not None: extensions.append(ext) # try to extract extension from URL urls = list(response.history) + [response.url] for url in map(str, urls): ext = os.path.splitext(urllib.parse.urlsplit(url).path)[-1] if (ext is not None) and (ext not in extensions): extensions.append(ext) # now guess from the extensions for ext in extensions: try: return SUPPORTED_IMG_FORMATS[ext[1:]] except KeyError: pass
def guessImageFormatFromHttpResponse(response)
Guess file format from HTTP response, return format or None.
2.577914
2.581778
0.998503
# find reference (=image most likely to match target cover ignoring factors like size and format) reference = None for result in results: if result.source_quality is CoverSourceQuality.REFERENCE: if ((reference is None) or (CoverSourceResult.compare(result, reference, target_size=target_size, size_tolerance_prct=size_tolerance_prct) > 0)): reference = result # remove results that are only refs results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results)) # remove duplicates no_dup_results = [] for result in results: is_dup = False for result_comp in results: if ((result_comp is not result) and (result_comp.urls == result.urls) and (__class__.compare(result, result_comp, target_size=target_size, size_tolerance_prct=size_tolerance_prct) < 0)): is_dup = True break if not is_dup: no_dup_results.append(result) dup_count = len(results) - len(no_dup_results) if dup_count > 0: logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count)) results = no_dup_results if reference is not None: logging.getLogger("Cover").info("Reference is: %s" % (reference)) reference.is_similar_to_reference = True # calculate sigs futures = [] for result in results: coroutine = result.updateSignature() future = asyncio.ensure_future(coroutine) futures.append(future) if reference.is_only_reference: assert(reference not in results) coroutine = reference.updateSignature() future = asyncio.ensure_future(coroutine) futures.append(future) if futures: await asyncio.wait(futures) for future in futures: future.result() # raise pending exception if any # compare other results to reference for result in results: if ((result is not reference) and (result.thumbnail_sig is not None) and (reference.thumbnail_sig is not None)): result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig, reference.thumbnail_sig) if result.is_similar_to_reference: logging.getLogger("Cover").debug("%s is similar to reference" % (result)) else: logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result)) else: logging.getLogger("Cover").warning("No reference result found") return results
async def preProcessForComparison(results, target_size, size_tolerance_prct)
Process results to prepare them for future comparison and sorting.
3.047148
3.025367
1.0072
parser = PIL.ImageFile.Parser() parser.feed(image_data) img = parser.close() target_size = (__class__.IMG_SIG_SIZE, __class__.IMG_SIG_SIZE) img.thumbnail(target_size, PIL.Image.BICUBIC) if img.size != target_size: logging.getLogger("Cover").debug("Non square thumbnail after resize to %ux%u, unable to compute signature" % target_size) return None img = img.convert(mode="RGB") pixels = img.getdata() pixel_count = target_size[0] * target_size[1] color_count = 3 r = bitarray.bitarray(pixel_count * color_count) r.setall(False) for ic in range(color_count): mean = sum(p[ic] for p in pixels) // pixel_count for ip, p in enumerate(pixels): if p[ic] > mean: r[pixel_count * ic + ip] = True return r
def computeImgSignature(image_data)
Calculate an image signature. This is similar to ahash but uses 3 colors components See: https://github.com/JohannesBuchner/imagehash/blob/4.0/imagehash/__init__.py#L125
3.469667
3.47684
0.997937
work = {} stats = collections.OrderedDict(((k, 0) for k in("files", "albums", "missing covers", "errors"))) with tqdm.tqdm(desc="Analyzing library", unit="dir", postfix=stats) as progress, \ tqdm_logging.redirect_logging(progress): for rootpath, rel_dirpaths, rel_filepaths in os.walk(lib_dir): metadata = analyze_dir(stats, rootpath, rel_filepaths, cover_filename, ignore_existing=ignore_existing) progress.set_postfix(stats, refresh=False) progress.update(1) if all(metadata[:-1]): work[rootpath] = metadata[:-1] return work
def analyze_lib(lib_dir, cover_filename, *, ignore_existing=False)
Recursively analyze library, and return a dict of path -> (artist, album).
5.205149
4.71732
1.103412
artist, album, has_embedded_album_art = None, None, None for audio_filepath in audio_filepaths: try: mf = mutagen.File(audio_filepath) except Exception: continue if mf is None: continue # artist for key in ("albumartist", "artist", # ogg "TPE1", "TPE2", # mp3 "aART", "\xa9ART"): # mp4 try: val = mf.get(key, None) except ValueError: val = None if val is not None: artist = val[-1] break # album for key in ("_album", "album", # ogg "TALB", # mp3 "\xa9alb"): # mp4 try: val = mf.get(key, None) except ValueError: val = None if val is not None: album = val[-1] break if artist and album: # album art if isinstance(mf, mutagen.ogg.OggFileType): has_embedded_album_art = "metadata_block_picture" in mf elif isinstance(mf, mutagen.mp3.MP3): has_embedded_album_art = any(map(operator.methodcaller("startswith", "APIC:"), mf.keys())) elif isinstance(mf, mutagen.mp4.MP4): has_embedded_album_art = "covr" in mf # stop at the first file that succeeds (for performance) break return artist, album, has_embedded_album_art
def get_metadata(audio_filepaths)
Return a tuple of album, artist, has_embedded_album_art from a list of audio files.
2.587495
2.366323
1.093467
no_metadata = None, None, None metadata = no_metadata audio_filepaths = [] for rel_filepath in rel_filepaths: stats["files"] += 1 try: ext = os.path.splitext(rel_filepath)[1][1:].lower() except IndexError: continue if ext in AUDIO_EXTENSIONS: audio_filepaths.append(os.path.join(parent_dir, rel_filepath)) if audio_filepaths: stats["albums"] += 1 if (cover_filename != EMBEDDED_ALBUM_ART_SYMBOL): missing = (not os.path.isfile(os.path.join(parent_dir, cover_filename))) or ignore_existing if missing: metadata = get_metadata(audio_filepaths) else: metadata = get_metadata(audio_filepaths) missing = (not metadata[2]) or ignore_existing if missing: stats["missing covers"] += 1 if not all(metadata[:-1]): # failed to get metadata for this album stats["errors"] += 1 logging.getLogger("sacad_r").error("Unable to read metadata for album directory '%s'" % (parent_dir)) else: metadata = no_metadata return metadata
def analyze_dir(stats, parent_dir, rel_filepaths, cover_filename, *, ignore_existing=False)
Analyze a directory (non recursively) to get its album metadata if it is one.
3.420067
3.192641
1.071235
with open(cover_filepath, "rb") as f: cover_data = f.read() for filename in os.listdir(path): try: ext = os.path.splitext(filename)[1][1:].lower() except IndexError: continue if ext in AUDIO_EXTENSIONS: filepath = os.path.join(path, filename) mf = mutagen.File(filepath) if (isinstance(mf.tags, mutagen._vorbis.VComment) or isinstance(mf, mutagen.ogg.OggFileType)): picture = mutagen.flac.Picture() picture.data = cover_data picture.type = mutagen.id3.PictureType.COVER_FRONT picture.mime = "image/jpeg" encoded_data = base64.b64encode(picture.write()) mf["metadata_block_picture"] = encoded_data.decode("ascii") elif (isinstance(mf.tags, mutagen.id3.ID3) or isinstance(mf, mutagen.id3.ID3FileType)): mf.tags.add(mutagen.id3.APIC(mime="image/jpeg", type=mutagen.id3.PictureType.COVER_FRONT, data=cover_data)) elif (isinstance(mf.tags, mutagen.mp4.MP4Tags) or isinstance(mf, mutagen.mp4.MP4)): mf["covr"] = [mutagen.mp4.MP4Cover(cover_data, imageformat=mutagen.mp4.AtomDataType.JPEG)] mf.save()
def embed_album_art(cover_filepath, path)
Embed album art into audio files.
2.282792
2.225712
1.025646
it = iter(iterable) while True: chunk = tuple(itertools.islice(it, n)) if not chunk: return yield chunk
def ichunk(iterable, n)
Split an iterable into n-sized chunks.
2.090048
1.956929
1.068024
with contextlib.ExitStack() as cm: if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: tmp_prefix = "%s_" % (os.path.splitext(os.path.basename(inspect.getfile(inspect.currentframe())))[0]) tmp_dir = cm.enter_context(tempfile.TemporaryDirectory(prefix=tmp_prefix)) # setup progress report stats = collections.OrderedDict(((k, 0) for k in("ok", "errors", "no result found"))) progress = cm.enter_context(tqdm.tqdm(total=len(work), miniters=1, desc="Searching covers", unit="cover", postfix=stats)) cm.enter_context(tqdm_logging.redirect_logging(progress)) def update_progress(future): path, cover_filepath, artist, album = futures[future] try: status = future.result() except Exception as exception: stats["errors"] += 1 logging.getLogger("sacad_r").error("Error occured while searching cover for " "'%s' by '%s' from '%s': %s %s" % (album, artist, path, exception.__class__.__qualname__, exception)) else: if status: if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: try: embed_album_art(cover_filepath, path) except Exception as exception: stats["errors"] += 1 logging.getLogger("sacad_r").error("Error occured while embedding cover for " "'%s' by '%s' from '%s': %s %s" % (album, artist, path, exception.__class__.__qualname__, exception)) else: stats["ok"] += 1 finally: os.remove(cover_filepath) else: stats["ok"] += 1 else: stats["no result found"] += 1 logging.getLogger("sacad_r").warning("Unable to find cover for '%s' by '%s' from '%s'" % (album, artist, path)) progress.set_postfix(stats, refresh=False) progress.update(1) # post work async_loop = asyncio.get_event_loop() i = 0 # default event loop on Windows has a 512 fd limit, see https://docs.python.org/3/library/asyncio-eventloops.html#windows # also on Linux default max open fd limit is 1024 (ulimit -n) # so work in smaller chunks to avoid hitting fd limit # this also updates the progress faster (instead of working on all searches, work on finishing the chunk before # getting to the next one) work_chunk_length = 16 for work_chunk in ichunk(work.items(), work_chunk_length): futures = {} for i, (path, (artist, album)) in enumerate(work_chunk, i): if args.filename == EMBEDDED_ALBUM_ART_SYMBOL: cover_filepath = os.path.join(tmp_dir, "%00u.%s" % (i, args.format.name.lower())) else: cover_filepath = os.path.join(path, args.filename) coroutine = sacad.search_and_download(album, artist, args.format, args.size, cover_filepath, size_tolerance_prct=args.size_tolerance_prct, amazon_tlds=args.amazon_tlds, no_lq_sources=args.no_lq_sources, async_loop=async_loop) future = asyncio.ensure_future(coroutine, loop=async_loop) futures[future] = (path, cover_filepath, artist, album) for future in futures: future.add_done_callback(update_progress) # wait for end of work root_future = asyncio.gather(*futures.keys(), loop=async_loop) async_loop.run_until_complete(root_future)
def get_covers(work, args)
Get missing covers.
3.207132
3.203401
1.001165
fd, filename = tempfile.mkstemp(*args, **kwargs) os.close(fd) try: yield filename finally: os.remove(filename)
def mkstemp(*args, **kwargs)
Context manager similar to tempfile.NamedTemporaryFile except the file is not deleted on close, and only the filepath is returned .. warnings:: Unlike tempfile.mkstemp, this is not secure
2.213367
2.741998
0.807209
# remove current handler assert(len(logger.handlers) == 1) prev_handler = logger.handlers[0] logger.removeHandler(prev_handler) # add tqdm handler tqdm_handler = TqdmLoggingHandler(tqdm_obj) if prev_handler.formatter is not None: tqdm_handler.setFormatter(prev_handler.formatter) logger.addHandler(tqdm_handler) try: yield finally: # restore handler logger.removeHandler(tqdm_handler) logger.addHandler(prev_handler)
def redirect_logging(tqdm_obj, logger=logging.getLogger())
Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original.
2.179578
2.035889
1.070578
self.logger.debug("Searching with source '%s'..." % (self.__class__.__name__)) album = self.processAlbumString(album) artist = self.processArtistString(artist) url_data = self.getSearchUrl(album, artist) if isinstance(url_data, tuple): url, post_data = url_data else: url = url_data post_data = None try: store_in_cache_callback, api_data = await self.fetchResults(url, post_data) results = await self.parseResults(api_data) except Exception as e: # raise self.logger.warning("Search with source '%s' failed: %s %s" % (self.__class__.__name__, e.__class__.__qualname__, e)) return () else: if results: # only store in cache if parsing succeeds and we have results await store_in_cache_callback() # get metadata futures = [] for result in filter(operator.methodcaller("needMetadataUpdate"), results): coroutine = result.updateImageMetadata() future = asyncio.ensure_future(coroutine) futures.append(future) if futures: await asyncio.wait(futures) for future in futures: future.result() # raise pending exception if any # filter results_excluded_count = 0 reference_only_count = 0 results_kept = [] for result in results: if ((result.size[0] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or # skip too small images (result.size[1] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or (result.format is None) or # unknown format result.needMetadataUpdate()): # if still true, it means we failed to grab metadata, so exclude it if result.source_quality is CoverSourceQuality.REFERENCE: # we keep this result just for the reference, it will be excluded from the results result.is_only_reference = True results_kept.append(result) reference_only_count += 1 else: results_excluded_count += 1 else: results_kept.append(result) result_kept_count = len(results_kept) - reference_only_count # log self.logger.info("Got %u relevant (%u excluded) results from source '%s'" % (result_kept_count, results_excluded_count + reference_only_count, self.__class__.__name__)) for result in itertools.filterfalse(operator.attrgetter("is_only_reference"), results_kept): self.logger.debug("%s %s%s %4dx%4d %s%s" % (result.__class__.__name__, ("(%02d) " % (result.rank)) if result.rank is not None else "", result.format.name, result.size[0], result.size[1], result.urls[0], " [x%u]" % (len(result.urls)) if len(result.urls) > 1 else "")) return results_kept
async def search(self, album, artist)
Search for a given album/artist and return an iterable of CoverSourceResult.
3.453185
3.357123
1.028615
if post_data is not None: self.logger.debug("Querying URL '%s' %s..." % (url, dict(post_data))) else: self.logger.debug("Querying URL '%s'..." % (url)) headers = {} self.updateHttpHeaders(headers) return await self.http.query(url, post_data=post_data, headers=headers, cache=__class__.api_cache)
async def fetchResults(self, url, post_data=None)
Get a (store in cache callback, search results) tuple from an URL.
3.963827
3.667739
1.080727
self.logger.debug("Probing URL '%s'..." % (url)) headers = {} self.updateHttpHeaders(headers) resp_headers = {} resp_ok = await self.http.isReachable(url, headers=headers, response_headers=resp_headers, cache=__class__.probe_cache) if response_headers is not None: response_headers.update(resp_headers) return resp_ok
async def probeUrl(self, url, response_headers=None)
Probe URL reachability from cache or HEAD request.
4.75603
4.211877
1.129195
return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
def unaccentuate(s)
Replace accentuated chars in string by their non accentuated equivalent.
2.242958
2.384171
0.940771
# remove punctuation s = "".join(c for c in s if c not in char_blacklist) # remove consecutive spaces return " ".join(filter(None, s.split(" ")))
def unpunctuate(s, *, char_blacklist=string.punctuation)
Remove punctuation from string s.
3.177681
3.048123
1.042504
params = collections.OrderedDict() params["search-alias"] = "popular" params["field-artist"] = artist params["field-title"] = album params["sort"] = "relevancerank" return __class__.assembleUrl(self.base_url, params)
def getSearchUrl(self, album, artist)
See CoverSource.getSearchUrl.
6.435432
5.966608
1.078575
results = [] # parse page parser = lxml.etree.HTMLParser() html = lxml.etree.XML(api_data.decode("utf-8", "ignore"), parser) for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS): result_nodes = result_selector(html) if result_nodes: break for rank, result_node in enumerate(result_nodes, 1): try: img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0] except IndexError: # no image for that product continue # get thumbnail & full image url thumbnail_url = img_node.get("src") url_parts = thumbnail_url.rsplit(".", 2) img_url = ".".join((url_parts[0], url_parts[2])) # assume size is fixed size = (500, 500) check_metadata = CoverImageMetadata.SIZE # try to get higher res image... if ((self.target_size > size[0]) and # ...only if needed (rank <= 3)): # and only for first 3 results because this is time # consuming (1 more GET request per result) product_url = __class__.PRODUCT_LINK_SELECTORS[page_struct_version](result_node)[0].get("href") product_url_split = urllib.parse.urlsplit(product_url) if not product_url_split.scheme: # relative redirect url product_url_query = urllib.parse.parse_qsl(product_url_split.query) product_url_query = collections.OrderedDict(product_url_query) try: # needed if page_struct_version == 1 product_url = product_url_query["url"] except KeyError: # page_struct_version == 0, make url absolute product_url = urllib.parse.urljoin(self.base_url, product_url) product_url_split = urllib.parse.urlsplit(product_url) product_url_query = urllib.parse.parse_qsl(product_url_split.query) product_url_query = collections.OrderedDict(product_url_query) try: # remove timestamp from url to improve future cache hit rate del product_url_query["qid"] except KeyError: pass product_url_query = urllib.parse.urlencode(product_url_query) product_url_no_ts = urllib.parse.urlunsplit(product_url_split[:3] + (product_url_query,) + product_url_split[4:]) store_in_cache_callback, product_page_data = await self.fetchResults(product_url_no_ts) product_page_html = lxml.etree.XML(product_page_data.decode("latin-1"), parser) try: img_node = __class__.PRODUCT_PAGE_IMG_SELECTOR(product_page_html)[0] except IndexError: # unable to get better image pass else: better_img_url = img_node.get("data-old-hires") # img_node.get("data-a-dynamic-image") contains json with image urls too, but they are not larger than # previous 500px image and are often covered by autorip badges (can be removed by cleaning url though) if better_img_url: img_url = better_img_url size_url_hint = img_url.rsplit(".", 2)[1].strip("_") assert(size_url_hint.startswith("SL")) size_url_hint = int(size_url_hint[2:]) size = (size_url_hint, size_url_hint) check_metadata = CoverImageMetadata.NONE await store_in_cache_callback() # assume format is always jpg format = CoverImageFormat.JPEG # add result results.append(AmazonCdCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, rank=rank, check_metadata=check_metadata)) return results
async def parseResults(self, api_data)
See CoverSource.parseResults.
3.903577
3.826011
1.020273
'''Calls `delete()` on all members of `obj` that are recognized as instances of `pg` objects.''' types = tuple([ Shader, Mesh, VertexBuffer, IndexBuffer, Texture, Program, Context, ]) for name in dir(obj): child = getattr(obj, name) if isinstance(child, types): child.delete()
def delete_all(obj)
Calls `delete()` on all members of `obj` that are recognized as instances of `pg` objects.
6.931458
3.79261
1.827622
''' Queries and returns the library version tuple or None by using a subprocess. ''' version_checker_source = args = [sys.executable, '-c', textwrap.dedent(version_checker_source)] process = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = process.communicate(_to_char_p(filename))[0] out = out.strip() if out: return eval(out) else: return None
def _glfw_get_version(filename)
Queries and returns the library version tuple or None by using a subprocess.
5.737463
3.721128
1.541861
''' Sets the error callback. Wrapper for: GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun); ''' global _error_callback previous_callback = _error_callback if cbfun is None: cbfun = 0 c_cbfun = _GLFWerrorfun(cbfun) _error_callback = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetErrorCallback(cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
def set_error_callback(cbfun)
Sets the error callback. Wrapper for: GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
2.994275
2.364038
1.266593
''' Destroys the specified window and its context. Wrapper for: void glfwDestroyWindow(GLFWwindow* window); ''' _glfw.glfwDestroyWindow(window) window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_ulong)).contents.value for callback_repository in _callback_repositories: if window_addr in callback_repository: del callback_repository[window_addr]
def destroy_window(window)
Destroys the specified window and its context. Wrapper for: void glfwDestroyWindow(GLFWwindow* window);
4.351564
3.36476
1.293276
''' Returns a nested python sequence. ''' size = self.width, self.height bits = self.red_bits, self.green_bits, self.blue_bits return size, bits, self.refresh_rate
def unwrap(self)
Returns a nested python sequence.
10.053178
6.0543
1.660502
''' Wraps a nested python sequence. ''' red, green, blue = gammaramp size = min(len(red), len(green), len(blue)) array_type = ctypes.c_ushort*size self.size = ctypes.c_uint(size) self.red_array = array_type() self.green_array = array_type() self.blue_array = array_type() for i in range(self.size): self.red_array[i] = int(red[i]*65535) self.green_array[i] = int(green[i]*65535) self.blue_array[i] = int(blue[i]*65535) pointer_type = ctypes.POINTER(ctypes.c_ushort) self.red = ctypes.cast(self.red_array, pointer_type) self.green = ctypes.cast(self.green_array, pointer_type) self.blue = ctypes.cast(self.blue_array, pointer_type)
def wrap(self, gammaramp)
Wraps a nested python sequence.
2.002753
1.778411
1.126148
''' Returns a nested python sequence. ''' red = [self.red[i]/65535.0 for i in range(self.size)] green = [self.green[i]/65535.0 for i in range(self.size)] blue = [self.blue[i]/65535.0 for i in range(self.size)] return red, green, blue
def unwrap(self)
Returns a nested python sequence.
3.063938
2.284728
1.341052
'''Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0. ''' r = ((value >> (8 * 2)) & 255) / 255.0 g = ((value >> (8 * 1)) & 255) / 255.0 b = ((value >> (8 * 0)) & 255) / 255.0 return (r, g, b)
def hex_color(value)
Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0.
2.339453
1.592646
1.468909
'''Normalizes the `vector` so that its length is 1. `vector` can have any number of components. ''' d = sum(x * x for x in vector) ** 0.5 return tuple(x / d for x in vector)
def normalize(vector)
Normalizes the `vector` so that its length is 1. `vector` can have any number of components.
4.154996
2.633309
1.577861
'''Computes and returns the distance between two points, `p1` and `p2`. The points can have any number of components. ''' return sum((a - b) ** 2 for a, b in zip(p1, p2)) ** 0.5
def distance(p1, p2)
Computes and returns the distance between two points, `p1` and `p2`. The points can have any number of components.
3.568531
2.13982
1.667678
'''Computes the cross product of two vectors. ''' return ( v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0], )
def cross(v1, v2)
Computes the cross product of two vectors.
1.553676
1.641903
0.946266
'''Computes the dot product of two vectors. ''' x1, y1, z1 = v1 x2, y2, z2 = v2 return x1 * x2 + y1 * y2 + z1 * z2
def dot(v1, v2)
Computes the dot product of two vectors.
1.978347
2.183605
0.906
'''Adds two vectors. ''' return tuple(a + b for a, b in zip(v1, v2))
def add(v1, v2)
Adds two vectors.
4.158019
4.746074
0.876097
'''Subtracts two vectors. ''' return tuple(a - b for a, b in zip(v1, v2))
def sub(v1, v2)
Subtracts two vectors.
3.675864
4.820034
0.762622
'''Interpolate from one vector to another. ''' return add(v1, mul(sub(v2, v1), t))
def interpolate(v1, v2, t)
Interpolate from one vector to another.
4.065472
5.11086
0.795457
'''Computes a normal vector given three points. ''' x1, y1, z1 = a x2, y2, z2 = b x3, y3, z3 = c ab = (x2 - x1, y2 - y1, z2 - z1) ac = (x3 - x1, y3 - y1, z3 - z1) x, y, z = cross(ab, ac) d = (x * x + y * y + z * z) ** 0.5 return (x / d, y / d, z / d)
def normal_from_points(a, b, c)
Computes a normal vector given three points.
1.766028
1.752927
1.007474
'''Assigns an averaged normal to each position based on all of the normals originally used for the position. ''' lookup = defaultdict(list) for position, normal in zip(positions, normals): lookup[position].append(normal) result = [] for position in positions: tx = ty = tz = 0 for x, y, z in lookup[position]: tx += x ty += y tz += z d = (tx * tx + ty * ty + tz * tz) ** 0.5 result.append((tx / d, ty / d, tz / d)) return result
def smooth_normals(positions, normals)
Assigns an averaged normal to each position based on all of the normals originally used for the position.
3.059401
2.195813
1.393289
'''Computes the bounding box for a list of 3-dimensional points. ''' (x0, y0, z0) = (x1, y1, z1) = positions[0] for x, y, z in positions: x0 = min(x0, x) y0 = min(y0, y) z0 = min(z0, z) x1 = max(x1, x) y1 = max(y1, y) z1 = max(z1, z) return (x0, y0, z0), (x1, y1, z1)
def bounding_box(positions)
Computes the bounding box for a list of 3-dimensional points.
1.71303
1.625291
1.053983
'''Returns a list of new positions centered around the origin. ''' (x0, y0, z0), (x1, y1, z1) = bounding_box(positions) dx = x1 - (x1 - x0) / 2.0 dy = y1 - (y1 - y0) / 2.0 dz = z1 - (z1 - z0) / 2.0 result = [] for x, y, z in positions: result.append((x - dx, y - dy, z - dz)) return result
def recenter(positions)
Returns a list of new positions centered around the origin.
2.034868
1.843662
1.10371
'''Interleaves the elements of the provided arrays. >>> a = [(0, 0), (1, 0), (2, 0), (3, 0)] >>> b = [(0, 0), (0, 1), (0, 2), (0, 3)] >>> interleave(a, b) [(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)] This is useful for combining multiple vertex attributes into a single vertex buffer. The shader attributes can be assigned a slice of the vertex buffer. ''' result = [] for array in zip(*args): result.append(tuple(flatten(array))) return result
def interleave(*args)
Interleaves the elements of the provided arrays. >>> a = [(0, 0), (1, 0), (2, 0), (3, 0)] >>> b = [(0, 0), (0, 1), (0, 2), (0, 3)] >>> interleave(a, b) [(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)] This is useful for combining multiple vertex attributes into a single vertex buffer. The shader attributes can be assigned a slice of the vertex buffer.
2.659945
1.505589
1.766714
'''Yields distinct items from `iterable` in the order that they appear. ''' seen = set() for item in iterable: key = item if keyfunc is None else keyfunc(item) if key not in seen: seen.add(key) yield item
def distinct(iterable, keyfunc=None)
Yields distinct items from `iterable` in the order that they appear.
2.737202
2.209101
1.239057
'''Computes the distance from a point to a triangle given a ray. ''' eps = 1e-6 e1 = sub(v2, v1) e2 = sub(v3, v1) p = cross(d, e2) det = dot(e1, p) if abs(det) < eps: return None inv = 1.0 / det t = sub(o, v1) u = dot(t, p) * inv if u < 0 or u > 1: return None q = cross(t, e1) v = dot(d, q) * inv if v < 0 or v > 1: return None t = dot(e2, q) * inv if t > eps: return t return None
def ray_triangle_intersection(v1, v2, v3, o, d)
Computes the distance from a point to a triangle given a ray.
2.107877
1.965891
1.072225
'''Convert a Python list into a ctypes buffer. This appears to be faster than the typical method of creating a ctypes array, e.g. (c_float * len(data))(*data) ''' func = struct.Struct(fmt).pack return create_string_buffer(''.join([func(x) for x in data]))
def pack_list(fmt, data)
Convert a Python list into a ctypes buffer. This appears to be faster than the typical method of creating a ctypes array, e.g. (c_float * len(data))(*data)
6.010242
2.394659
2.509853
if jquery: e = JQuery(self) e.click() else: super(Clickable, self).click()
def click(self, jquery=False)
Click by WebElement, if not, JQuery click
5.000114
4.750831
1.052471
cookie_dict = dict() for k in keys_map.keys(): key = _to_unicode_if_str(keys_map[k]) value = _to_unicode_if_str(getattr(cookie, k)) cookie_dict[key] = value return cookie_dict
def convert_cookie_to_dict(cookie, keys_map=WEB_DRIVER_COOKIE_KEYS_MAP)
Converts an instance of Cookie class from cookielib to a dict. The names of attributes can be changed according to keys_map:. For example, this method can be used to create a cookie which compatible with WebDriver format. :param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries. :param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used. :return:
2.701835
3.04462
0.887413
for cookie in cookies: driver.add_cookie(convert_cookie_to_dict(cookie)) return driver
def add_cookies_to_web_driver(driver, cookies)
Sets cookies in an existing WebDriver session.
2.893091
3.066068
0.943583
self.conf = conf self.when = options.browser_closer_when
def configure(self, options, conf)
Configure plugin. Plugin is enabled by default.
18.189978
17.743834
1.025144
basename = os.path.basename(root) if os.path.splitext(basename)[0] != '__init__' and basename.startswith('_'): return location = self._determine_location_for(root) if os.path.isfile(root): self._index_module(root, location) elif os.path.isdir(root) and os.path.exists(os.path.join(root, '__init__.py')): self._index_package(root, location)
def index_path(self, root)
Index a path. :param root: Either a package directory, a .so or a .py module.
2.687639
2.783634
0.965515
if not paths: paths = sys.path if not name: name = 'default' self._name = name idx_dir = get_cache_dir() idx_file = os.path.join(idx_dir, name + '.json') if os.path.exists(idx_file) and not refresh: with open(idx_file) as fd: self.deserialize(fd) else: self.build_index(paths) with open(idx_file, 'w') as fd: self.serialize(fd) return self
def get_or_create_index(self, paths=None, name=None, refresh=False)
Get index with given name from cache. Create if it doesn't exists.
2.352176
2.252864
1.044082
scores = [] path = [] # sys.path sys path -> import sys # os.path.basename os.path basename -> import os.path # basename os.path basename -> from os.path import basename # path.basename os.path basename -> from os import path def fixup(module, variable): prefix = module.split('.') if variable is not None: prefix.append(variable) seeking = symbol.split('.') new_module = [] while prefix and seeking[0] != prefix[0]: new_module.append(prefix.pop(0)) if new_module: module, variable = '.'.join(new_module), prefix[0] else: variable = None return module, variable def score_walk(scope, scale): sub_path, score = self._score_key(scope, full_key) if score > 0.1: try: i = sub_path.index(None) sub_path, from_symbol = sub_path[:i], '.'.join(sub_path[i + 1:]) except ValueError: from_symbol = None package_path = '.'.join(path + sub_path) package_path, from_symbol = fixup(package_path, from_symbol) scores.append((score * scale, package_path, from_symbol)) for key, subscope in scope._tree.items(): if type(subscope) is not float: path.append(key) score_walk(subscope, subscope.score * scale - 0.1) path.pop() full_key = symbol.split('.') score_walk(self, 1.0) scores.sort(reverse=True) return scores
def symbol_scores(self, symbol)
Find matches for symbol. :param symbol: A . separated symbol. eg. 'os.path.basename' :returns: A list of tuples of (score, package, reference|None), ordered by score from highest to lowest.
3.864868
3.786006
1.02083
path = path.split('.') node = self while node._parent: node = node._parent for name in path: node = node._tree.get(name, None) if node is None or type(node) is float: return None return node
def find(self, path)
Return the node for a path, or None.
3.654473
3.320769
1.10049
path = path.split('.') node = self while node._parent: node = node._parent location = node.location for name in path: tree = node._tree.get(name, None) if tree is None or type(tree) is float: return location location = tree.location return location
def location_for(self, path)
Return the location code for a path.
4.199203
3.858886
1.088191
items_list = self.get_options() for item in items_list: if item.get_attribute("value") == option: item.click() break
def select_option(self, option)
Performs selection of provided item from Web List @params option - string item name
3.083205
3.344587
0.921849
items_list = self.get_options() return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_attribute_selected(self, attribute)
Performs search of selected item from Web List Return attribute of selected item @params attribute - string attribute name
4.791818
4.750484
1.008701
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text)) opts = self.find_elements_by_xpath(xpath) matched = False for opt in opts: self._set_selected(opt) if not self.is_multiple: return matched = True # in case the target option isn't found by xpath # attempt to find it by direct comparison among options which contain at least the longest token from the text if len(opts) == 0 and ' ' in text: sub_string_without_space = self._get_longest_token(text) if sub_string_without_space == "": candidates = self.get_options() else: xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space)) candidates = self.find_elements_by_xpath(xpath) for candidate in candidates: if text == candidate.text: self._set_selected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: " + str(text))
def select_by_visible_text(self, text)
Performs search of selected item from Web List @params text - string visible text
3.350204
3.508837
0.95479
kwargs.setdefault('sleep_seconds', (1, None)) kwargs.setdefault('expected_exceptions', WebDriverException) kwargs.setdefault('timeout_seconds', webium.settings.wait_timeout) return wait_lib(*args, **kwargs)
def wait(*args, **kwargs)
Wrapping 'wait()' method of 'waiting' library with default parameter values. WebDriverException is ignored in the expected exceptions by default.
6.809405
4.869012
1.398519
if isinstance(source, text_type) and sys.version_info[0] == 2: # ast.parse() on Python 2 doesn't like encoding declarations # in Unicode strings source = CODING_COOKIE_RE.sub(r'\1', source, 1) return ast.parse(source, filename or '<unknown>')
def parse_ast(source, filename=None)
Parse source into a Python AST, taking care of encoding.
4.726437
4.303702
1.098226
unresolved = set() unreferenced = self._definitions.copy() self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced, frozenset(self._definitions), start=True) return unresolved, unreferenced - Scope.ALL_BUILTINS
def find_unresolved_and_unreferenced_symbols(self)
Find any unresolved symbols, and unreferenced symbols from this scope. :returns: ({unresolved}, {unreferenced})
7.406662
8.511763
0.870168
def get_item(key): CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) try: return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"] except (IOError, ValueError): return None
Return content in cached file in JSON format
null
null
null
def set_item(key,value): CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8')) return value
Write JSON content from value argument to cached file and return
null
null
null
def delete_item(key): CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) if os.path.isfile(CACHED_KEY_FILE): os.remove(CACHED_KEY_FILE)
Delete cached file if present
null
null
null
if isinstance(data, dict) or isinstance(data, list): self._raw_data = data self._json_data = copy.deepcopy(self._raw_data) else: raise TypeError("Provided Data is not json")
def __parse_json_data(self, data)
Process Json data :@param data :@type data: json/dict :throws TypeError
3.836312
3.847749
0.997028
if file_path == '' or os.path.splitext(file_path)[1] != '.json': raise IOError('Invalid Json file') with open(file_path) as json_file: self._raw_data = json.load(json_file) self._json_data = copy.deepcopy(self._raw_data)
def __parse_json_file(self, file_path)
Process Json file data :@param file_path :@type file_path: string :@throws IOError
2.670006
2.707599
0.986116
if key.isdigit(): return data[int(key)] if key not in data: raise KeyError("Key not exists") return data.get(key)
def __get_value_from_data(self, key, data)
Find value from json data :@pram key :@type: string :@pram data :@type data: dict :@return object :@throws KeyError
4.851429
4.911089
0.987852
leafs = root.strip(" ").split('.') for leaf in leafs: if leaf: self._json_data = self.__get_value_from_data(leaf, self._json_data) return self
def at(self, root)
Set root where PyJsonq start to prepare :@param root :@type root: string :@return self :@throws KeyError
7.047566
6.899011
1.021533
if data and (isinstance(data, dict) or isinstance(data, list)): self._json_data = data else: self._json_data = copy.deepcopy(self._raw_data) self.__reset_queries() return self
def reset(self, data={})
JsonQuery object cen be reset to new data according to given data or previously given raw Json data :@param data: {} :@type data: json/dict :@return self
4.158183
4.444146
0.935654
temp_index = self._current_query_index if len(self._queries) - 1 < temp_index: self._queries.append([]) self._queries[temp_index].append(query_items)
def __store_query(self, query_items)
Make where clause :@param query_items :@type query_items: dict
3.679477
3.835203
0.959396
def func(item): or_check = False for queries in self._queries: and_check = True for query in queries: and_check &= self._matcher._match( item.get(query.get('key'), None), query.get('operator'), query.get('value') ) or_check |= and_check return or_check self._json_data = list(filter(lambda item: func(item), self._json_data))
def __execute_queries(self)
Execute all condition and filter result data
3.861117
3.548559
1.08808
self.__store_query({"key": key, "operator": operator, "value": value}) return self
def where(self, key, operator, value)
Make where clause :@param key :@param operator :@param value :@type key,operator,value: string :@return self
6.051506
8.880305
0.681452
if len(self._queries) > 0: self._current_query_index += 1 self.__store_query({"key": key, "operator": operator, "value": value}) return self
def or_where(self, key, operator, value)
Make or_where clause :@param key :@param operator :@param value :@type key, operator, value: string :@return self
4.506608
5.22787
0.862035
self.__prepare() return None if self.count() < math.fabs(index) else self._json_data[index]
def nth(self, index)
Getting the nth element of the collection :@param index :@type index: int :@return object
12.392902
15.365479
0.806542