code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: if self._data_from_search: agent = self._data_from_search.find( 'ul', {'class': 'links'}).text return agent.split(':')[1].strip() else: return self._ad_page_content.find('a', {'id': 'smi-link-branded'}).text.strip() except Exception as e: if self._debug: logging.error( "Error getting agent. Error message: " + e.args[0]) return
def agent(self)
This method returns the agent name. :return:
5.602186
5.424238
1.032806
try: if self._data_from_search: agent = self._data_from_search.find('ul', {'class': 'links'}) links = agent.find_all('a') return links[1]['href'] else: return self._ad_page_content.find('a', {'id': 'smi-link-branded'})['href'] except Exception as e: if self._debug: logging.error( "Error getting agent_url. Error message: " + e.args[0]) return
def agent_url(self)
This method returns the agent's url. :return:
4.507099
4.489753
1.003863
try: number = self._ad_page_content.find( 'button', {'class': 'phone-number'}) return (base64.b64decode(number.attrs['data-p'])).decode('ascii') except Exception as e: if self._debug: logging.error( "Error getting contact_number. Error message: " + e.args[0]) return 'N/A'
def contact_number(self)
This method returns the contact phone number. :return:
5.268614
5.183738
1.016374
try: if self._data_from_search: link = self._data_from_search.find('a', href=True) return 'http://www.daft.ie' + link['href'] else: return self._ad_page_content.find('link', {'rel': 'canonical'})['href'] except Exception as e: if self._debug: logging.error( "Error getting daft_link. Error message: " + e.args[0]) return
def daft_link(self)
This method returns the url of the listing. :return:
3.517113
3.340982
1.052718
try: div = self._ad_page_content.find( 'div', {'class': 'description_extras'}) index = [i for i, s in enumerate( div.contents) if 'Shortcode' in str(s)][0] + 1 return div.contents[index]['href'] except Exception as e: if self._debug: logging.error( "Error getting shortcode. Error message: " + e.args[0]) return 'N/A'
def shortcode(self)
This method returns the shortcode url of the listing. :return:
4.69468
4.474502
1.049207
try: div = self._ad_page_content.find( 'div', {'class': 'description_extras'}) index = [i for i, s in enumerate( div.contents) if 'Property Views' in str(s)][0] + 1 return int(''.join(list(filter(str.isdigit, div.contents[index])))) except Exception as e: if self._debug: logging.error( "Error getting views. Error message: " + e.args[0]) return 'N/A'
def views(self)
This method returns the "Property Views" from listing. :return:
4.68138
4.264647
1.097718
try: if self._data_from_search: info = self._data_from_search.find( 'ul', {"class": "info"}).text s = info.split('|') return s[0].strip() else: return self._ad_page_content.find( 'div', {'id': 'smi-summary-items'} ).find('span', {'class': 'header_text'}).text except Exception as e: if self._debug: logging.error( "Error getting dwelling_type. Error message: " + e.args[0]) return
def dwelling_type(self)
This method returns the dwelling type. :return:
5.216168
5.170949
1.008745
try: if self._data_from_search: info = self._data_from_search.find( 'div', {"class": "date_entered"}).text s = info.split(':') return s[-1].strip() else: div = self._ad_page_content.find( 'div', {'class': 'description_extras'}) index = [i for i, s in enumerate( div.contents) if 'Entered/Renewed' in str(s)][0] + 1 return re.search("([0-9]{1,2}/[0-9]{1,2}/[0-9]{4})", str(div.contents[index]))[0] except Exception as e: if self._debug: logging.error( "Error getting posted_since. Error message: " + e.args[0]) return
def posted_since(self)
This method returns the date the listing was entered. :return:
4.214539
3.964803
1.062988
try: if self._data_from_search: info = self._data_from_search.find( 'ul', {"class": "info"}).text s = info.split('|') nb = s[1].strip() return int(nb.split()[0]) else: div = self._ad_page_content.find( 'div', {'id': 'smi-summary-items'}) spans = div.find_all('span', {'class': 'header_text'}) for span in spans: # print(span.text) if 'bed' in span.text.lower(): return int(''.join([n for n in span.text if n.isdigit()])) return except Exception as e: if self._debug: logging.error( "Error getting bedrooms. Error message: " + e.args[0]) return 'N/A'
def bedrooms(self)
This method gets the number of bedrooms. :return:
4.066585
3.996431
1.017554
try: infos = self._ad_page_content.find_all( 'div', {"class": "map_info_box"}) for info in infos: if 'Distance to City Centre' in info.text: distance_list = re.findall( 'Distance to City Centre: (.*) km', info.text) return distance_list[0] return None except Exception as e: if self._debug: logging.error(e.args[0]) print(e) return 'N/A'
def city_center_distance(self)
This method gets the distance to city center, in km. :return:
4.133668
3.900542
1.059768
routes = {} try: big_div = self._ad_page_content.find( 'div', {"class": "half_area_box_right"}) uls = big_div.find("ul") if uls is None: return None for li in uls.find_all('li'): route_li = li.text.split(':') routes[route_li[0]] = [x.strip() for x in route_li[1].split(',')] return routes except Exception as e: if self._debug: logging.error(e.args[0]) return 'N/A'
def transport_routes(self)
This method gets a dict of routes listed in Daft. :return:
4.164271
3.969705
1.049013
try: scripts = self._ad_page_content.find_all('script') for script in scripts: if 'longitude' in script.text: find_list = re.findall( r'"longitude":"([\-]?[0-9.]*[0-9]+)"', script.text) if len(find_list) >= 1: return find_list[0] return None except Exception as e: if self._debug: logging.error( "Error getting longitude. Error message: " + e.args[0]) return None
def longitude(self)
This method gets a dict of routes listed in Daft. :return:
3.629207
3.578692
1.014116
try: alt_text = self._ad_page_content.find( 'span', {'class': 'ber-hover'} ).find('img')['alt'] if ('exempt' in alt_text): return 'exempt' else: alt_arr = alt_text.split() if 'ber' in alt_arr[0].lower(): return alt_arr[1].lower() else: return None except Exception as e: if self._debug: logging.error( "Error getting the Ber Code. Error message: " + e.args[0]) return None
def ber_code(self)
This method gets ber code listed in Daft. :return:
4.512335
4.449023
1.01423
req = Request(debug=self._debug) ad_search_type = self.search_type agent_id = self.agent_id ad_id = self.id response = req.post('https://www.daft.ie/ajax_endpoint.php?', params={ 'action': 'daft_contact_advertiser', 'from': name, 'email': email, 'message': message, 'contact_number': contact_number, 'type': ad_search_type, 'agent_id': agent_id, 'id': ad_id }) if self._debug: logging.info("Status code: %d" % response.status_code) logging.info("Response: %s" % response.content) if response.status_code != 200: logging.error("Status code: %d" % response.status_code) logging.error("Response: %s" % response.content) return response.status_code == 200
def contact_advertiser(self, name, email, contact_number, message)
This method allows you to contact the advertiser of a listing. :param name: Your name :param email: Your email address. :param contact_number: Your contact number. :param message: Your message. :return:
2.661849
2.726186
0.9764
return { 'search_type': self.search_type, 'agent_id': self.agent_id, 'id': self.id, 'price': self.price, 'price_change': self.price_change, 'viewings': self.upcoming_viewings, 'facilities': self.facilities, 'overviews': self.overviews, 'formalised_address': self.formalised_address, 'address_line_1': self.address_line_1, 'county': self.county, 'listing_image': self.images, 'listing_hires_image': self.hires_images, 'agent': self.agent, 'agent_url': self.agent_url, 'contact_number': self.contact_number, 'daft_link': self.daft_link, 'shortcode': self.shortcode, 'date_insert_update': self.date_insert_update, 'views': self.views, 'description': self.description, 'dwelling_type': self.dwelling_type, 'posted_since': self.posted_since, 'num_bedrooms': self.bedrooms, 'num_bathrooms': self.bathrooms, 'city_center_distance': self.city_center_distance, 'transport_routes': self.transport_routes, 'latitude': self.latitude, 'longitude': self.longitude, 'ber_code': self.ber_code, 'commercial_area_size': self.commercial_area_size }
def as_dict(self)
Return a Listing object as Dictionary :return: dict
2.961597
2.913855
1.016384
"Fetch the variables and functions" #print("Here is the config:", config) # fetch variables from YAML file: self._variables = config.get(YAML_SUBSET) # add variables and functions from the module: module_reader.load_variables(self._variables, config) print("Variables:", self.variables)
def on_config(self, config)
Fetch the variables and functions
13.235082
9.758271
1.356294
"Provide a hook for defining functions from an external module" # the site_navigation argument has been made optional # (deleted in post 1.0 mkdocs, but maintained here # for backward compatibility) if not self.variables: return markdown else: # Create templae and get the variables md_template = Template(markdown) # Execute the jinja2 template and return return md_template.render(**self.variables)
def on_page_markdown(self, markdown, page, config, site_navigation=None, **kwargs)
Provide a hook for defining functions from an external module
13.862021
9.558552
1.450222
def macro(v, name=''): name = name or v.__name__ variables[name] = v return v # determine the package name, from the filename: python_module = config.get('python_module') or DEFAULT_MODULE_NAME # get the directory of the yaml file: config_file = config['config_file_path'] yaml_dir = os.path.dirname(config_file) # print("Found yaml directory: %s" % yaml_dir) # that's the directory of the package: repackage.add(yaml_dir) try: module = importlib.import_module(python_module) print("Found module '%s'" % python_module) # execute the hook, passing the template decorator function module.declare_variables(variables, macro) except ModuleNotFoundError: print("No module found.")
def load_variables(variables, config)
Add the template functions, via the python module located in the same directory as the Yaml config file. The python module must contain the following hook: declare_variables(variables, macro): variables['a'] = 5 @macro def bar(x): .... @macro def baz(x): ....
6.099211
4.968483
1.22758
if version_info < (3, 0) or validate: if validate and len(s) % 4 != 0: raise BinAsciiError('Incorrect padding') s = _get_bytes(s) if altchars is not None: altchars = _get_bytes(altchars) assert len(altchars) == 2, repr(altchars) if version_info < (3, 0): map = maketrans(altchars, b'+/') else: map = bytes.maketrans(altchars, b'+/') s = s.translate(map) try: result = builtin_decode(s, altchars) except TypeError as e: raise BinAsciiError(str(e)) if validate: # check length of result vs length of input padding = 0 if len(s) > 1 and s[-2] in (b'=', 61): padding = padding + 1 if len(s) > 0 and s[-1] in (b'=', 61): padding = padding + 1 if 3 * (len(s) / 4) - padding != len(result): raise BinAsciiError('Non-base64 digit found') return result return builtin_decode(s, altchars)
def b64decode(s, altchars=None, validate=False)
Decode bytes encoded with the standard Base64 alphabet. Argument ``s`` is a :term:`bytes-like object` or ASCII string to decode. Optional ``altchars`` must be a :term:`bytes-like object` or ASCII string of length 2 which specifies the alternative alphabet used instead of the '+' and '/' characters. If ``validate`` is ``False`` (the default), characters that are neither in the normal base-64 alphabet nor the alternative alphabet are discarded prior to the padding check. If ``validate`` is ``True``, these non-alphabet characters in the input result in a :exc:`binascii.Error`. The result is returned as a :class:`bytes` object. A :exc:`binascii.Error` is raised if ``s`` is incorrectly padded.
2.877239
2.817064
1.021361
if altchars is not None: altchars = _get_bytes(altchars) assert len(altchars) == 2, repr(altchars) if version_info < (3, 0): if isinstance(s, text_type): raise TypeError('a bytes-like object is required, not \'' + type(s).__name__ + '\'') return builtin_encode(s, altchars)
def b64encode(s, altchars=None)
Encode bytes using the standard Base64 alphabet. Argument ``s`` is a :term:`bytes-like object` to encode. Optional ``altchars`` must be a byte string of length 2 which specifies an alternative alphabet for the '+' and '/' characters. This allows an application to e.g. generate url or filesystem safe Base64 strings. The result is returned as a :class:`bytes` object.
3.482126
3.505211
0.993414
for ext in ['*.so', '*.pyd']: for file in glob.glob('./pybase64/' + ext): log.info("removing '%s'", file) if self.dry_run: continue os.remove(file)
def run(self)
Run command.
5.814494
5.799055
1.002662
try: song_name = os.path.splitext(song_name)[0] except IndexError: pass song_name = song_name.partition('ft')[0] # Replace characters to filter with spaces song_name = ''.join( map(lambda c: " " if c in chars_filter else c, song_name)) # Remove crap words song_name = re.sub('|'.join(re.escape(key) for key in words_filter), "", song_name, flags=re.IGNORECASE) # Remove duplicate spaces song_name = re.sub(' +', ' ', song_name) return song_name.strip()
def improve_name(song_name)
Improves file name by removing words such as HD, Official,etc eg : Hey Jude (Official HD) lyrics -> Hey Jude This helps in better searching of metadata since a spotify search of 'Hey Jude (Official HD) lyrics' fetches 0 results
3.265969
3.275213
0.997178
YOUTUBECLASS = 'spf-prefetch' html = requests.get("https://www.youtube.com/results", params={'search_query': song_input}) soup = BeautifulSoup(html.text, 'html.parser') soup_section = soup.findAll('a', {'rel': YOUTUBECLASS}) # Use generator over list, since storage isn't important song_urls = ('https://www.youtube.com' + i.get('href') for i in soup_section) song_titles = (i.get('title') for i in soup_section) youtube_list = list(zip(song_urls, song_titles)) del song_urls del song_titles return youtube_list
def get_song_urls(song_input)
Gather all urls, titles for a search query from youtube
3.825119
3.609012
1.05988
outtmpl = song_title + '.%(ext)s' ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': outtmpl, 'postprocessors': [ {'key': 'FFmpegExtractAudio','preferredcodec': 'mp3', 'preferredquality': '192', }, {'key': 'FFmpegMetadata'}, ], } with youtube_dl.YoutubeDL(ydl_opts) as ydl: info_dict = ydl.extract_info(song_url, download=True)
def download_song(song_url, song_title)
Download a song using youtube url and song title
1.710229
1.722234
0.993029
song_name = improve_name(file_name) # Remove useless words from title client_credentials_manager = SpotifyClientCredentials(client_id, client_secret) spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager) results = spotify.search(song_name, limit=1) results = results['tracks']['items'][0] # Find top result album = results['album']['name'] # Parse json dictionary artist = results['album']['artists'][0]['name'] song_title = results['name'] album_art = results['album']['images'][0]['url'] return artist, album, song_title, album_art
def get_metadata(file_name, client_id, client_secret)
Tries finding metadata through Spotify
2.882999
2.711549
1.06323
img = requests.get(album_art, stream=True) # Gets album art from url img = img.raw audio = EasyMP3(file_name, ID3=ID3) try: audio.add_tags() except _util.error: pass audio.tags.add( APIC( encoding=3, # UTF-8 mime='image/png', type=3, # 3 is for album art desc='Cover', data=img.read() # Reads and adds album art ) ) audio.save() return album_art
def add_album_art(file_name, album_art)
Add album_art in .mp3's tags
3.007632
2.824157
1.064966
tags = EasyMP3(file_name) if title: tags["title"] = title if artist: tags["artist"] = artist if album: tags["album"] = album tags.save() return file_name
def add_metadata(file_name, title, artist, album)
As the method name suggests
2.982908
3.35984
0.887812
for file_path in files: tags = EasyMP3(file_path) tags.delete() tags.save()
def revert_metadata(files)
Removes all tags from a mp3 file
7.314019
4.289663
1.705033
qset = self.filter(user=user) if not qset: return None if qset.count() > 1: raise Exception('This app does not currently support multiple vault ids') return qset.get()
def get_user_vault_instance_or_none(self, user)
Returns a vault_id string or None
4.773189
4.470996
1.06759
assert self.is_in_vault(user) if vault_id: user_vault = self.get(user=user, vault_id=vault_id) else: user_vault = self.get(user=user)
def charge(self, user, vault_id=None)
If vault_id is not passed this will assume that there is only one instane of user and vault_id in the db.
2.806076
2.619694
1.071146
try: result = Transaction.sale( { 'amount': amount.quantize(Decimal('.01')), 'customer_id': self.vault_id, "options": { "submit_for_settlement": True } } ) if result.is_success: # create a payment log payment_log = PaymentLog.objects.create(user=self.user, amount=amount, transaction_id=result.transaction.id) return payment_log else: raise Exception('Logical error in CC transaction') except Exception: logging.error('Failed to charge $%s to user: %s with vault_id: %s' % (amount, self.user, self.vault_id)) return None
def charge(self, amount)
Charges the users credit card, with he passed $amount, if they are in the vault. Returns the payment_log instance or None (if charge fails etc.)
3.478983
2.914896
1.193519
assert self.is_valid() cc_details_map = { # cc details 'number': self.cleaned_data['cc_number'], 'cardholder_name': self.cleaned_data['name'], 'expiration_date': '%s/%s' %\ (self.cleaned_data['expiration_month'], self.cleaned_data['expiration_year']), 'cvv': self.cleaned_data['cvv'], 'billing_address': { 'postal_code': self.cleaned_data['zip_code'], } } if self.__user_vault: try: # get customer info, its credit card and then update that credit card response = Customer.find(self.__user_vault.vault_id) cc_info = response.credit_cards[0] return CreditCard.update(cc_info.token, params=cc_details_map) except Exception, e: logging.error('Was not able to get customer from vault. %s' % e) self.__user_vault.delete() # delete the stale instance from our db # in case the above updating fails or user was never in the vault new_customer_vault_id = '%s%s' % (prepend_vault_id, md5_hash()[:24]) respone = Customer.create({ # creating a customer, but we really just want to store their CC details 'id': new_customer_vault_id, # vault id, uniquely identifies customer. We're not caring about tokens (used for storing multiple CC's per user) 'credit_card': cc_details_map }) if respone.is_success: # save a new UserVault instance UserVault.objects.create(user=self.__user, vault_id=new_customer_vault_id) return respone
def save(self, prepend_vault_id='')
Adds or updates a users CC to the vault. @prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by multiple projects/apps.
4.960135
4.626248
1.072172
d = {} if request.method == 'POST': # Credit Card is being changed/updated by the user form = UserCCDetailsForm(request.user, True, request.POST) if form.is_valid(): response = form.save() if response.is_success: messages.add_message(request, messages.SUCCESS, 'Your credit card information has been securely saved.') return JsonResponse() else: return JsonResponse(success=False, errors=[BAD_CC_ERROR_MSG]) return JsonResponse(success=False, data={'form': form_errors_serialize(form)}) else: if UserVault.objects.is_in_vault(request.user): try: response = Customer.find(UserVault.objects.get_user_vault_instance_or_none(request.user).vault_id) d['current_cc_info'] = response.credit_cards[0] except Exception, e: logging.error('Unable to get vault information for user from braintree. %s' % e) d['cc_form'] = UserCCDetailsForm(request.user) return render(request, template, d)
def payments_billing(request, template='django_braintree/payments_billing.html')
Renders both the past payments that have occurred on the users credit card, but also their CC information on file (if any)
4.022186
3.900055
1.031315
path = '{0}/{1}/{2}'.format(self.collection.name, self.id, 'snooze') data = {"duration": duration} extra_headers = {"From": requester} return self.pagerduty.request('POST', path, data=_json_dumper(data), extra_headers=extra_headers)
def snooze(self, requester, duration)
Snooze incident. :param requester: The email address of the individual requesting snooze.
4.38978
4.889166
0.897858
path = '{0}'.format(self.collection.name) assignments = [] if not user_ids: raise Error('Must pass at least one user id') for user_id in user_ids: ref = { "assignee": { "id": user_id, "type": "user_reference" } } assignments.append(ref) data = { "incidents": [ { "id": self.id, "type": "incident_reference", "assignments": assignments } ] } extra_headers = {"From": requester} return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers)
def reassign(self, user_ids, requester)
Reassign this incident to a user or list of users :param user_ids: A non-empty list of user ids :param requester: The email address of individual requesting reassign
3.359226
3.426088
0.980484
return self.create_event(description, "resolve", details, incident_key)
def resolve_incident(self, incident_key, description=None, details=None)
Causes the referenced incident to enter resolved state. Send a resolve event when the problem that caused the initial trigger has been fixed.
11.142553
13.119367
0.849321
'''Recurse through dictionary and replace any keys "self" with "self_"''' if type(response) is list: for elem in response: clean_response(elem) elif type(response) is dict: for key, val in response.items(): if key == 'self': val = response.pop('self') response['self_'] = val clean_response(val) else: clean_response(response[key]) return response
def clean_response(response)
Recurse through dictionary and replace any keys "self" with "self_"
3.427783
2.137256
1.603824
if not string: return "" new_string = [string[0].lower()] for char in string[1:]: if char.isupper(): new_string.append("_") new_string.append(char.lower()) return "".join(new_string)
def _lower(string)
Custom lower string function. Examples: FooBar -> foo_bar
2.08638
2.154066
0.968578
if not user_ids: raise Error('Must pass at least one user id') self._do_action('reassign', requester_id=requester_id, assigned_to_user=','.join(user_ids))
def reassign(self, user_ids, requester_id)
Reassign this incident to a user or list of users :param user_ids: A non-empty list of user ids
4.934003
5.579332
0.884336
return self.create_event(service_key, description, "acknowledge", details, incident_key)
def acknowledge_incident(self, service_key, incident_key, description=None, details=None)
Causes the referenced incident to enter the acknowledged state. Send an acknowledge event when someone is presently working on the incident.
6.27924
8.349441
0.752055
return self.create_event(service_key, description, "trigger", details, incident_key, client=client, client_url=client_url, contexts=contexts)
def trigger_incident(self, service_key, description, incident_key=None, details=None, client=None, client_url=None, contexts=None)
Report a new or ongoing problem. When PagerDuty receives a trigger, it will either open a new incident, or add a new log entry to an existing incident.
3.064194
4.10394
0.746647
if isinstance(dataset, numpy.ndarray) and not len(dataset.shape) == 4: check_dataset_shape(dataset) check_dataset_range(dataset) else: # must be a list of arrays or a 4D NumPy array for i, d in enumerate(dataset): if not isinstance(d, numpy.ndarray): raise ValueError( 'Requires a NumPy array (rgb x rows x cols) ' 'with integer values in the range [0, 255].' ) try: check_dataset_shape(d) check_dataset_range(d) except ValueError as err: raise ValueError( '{}\nAt position {} in the list of arrays.' .format(err, i) )
def check_dataset(dataset)
Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK.
3.364534
3.085645
1.090383
if isinstance(dataset, numpy.ndarray): if len(dataset.shape) == 3: # NumPy 3D if dataset.shape[-1] == 3: return dataset.transpose((2, 0, 1)) elif len(dataset.shape) == 4: # NumPy 4D if dataset.shape[-1] == 3: return dataset.transpose((0, 3, 1, 2)) # Otherwise couldn't fix it. return dataset # List of Numpy 3D arrays. for i, d in enumerate(dataset): if not isinstance(d, numpy.ndarray): return dataset if not (len(d.shape) == 3 and d.shape[-1] == 3): return dataset dataset[i] = d.transpose() return dataset
def try_fix_dataset(dataset)
Transpose the image data if it's in PIL format.
2.213011
2.079236
1.064339
dim, nrow, ncol = dataset.shape uint8_dataset = dataset.astype('uint8') if not (uint8_dataset == dataset).all(): message = ( "\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), " "but some information was lost.\nPlease check your gif and " "convert to uint8 beforehand if the gif looks wrong." ) warnings.warn(message) image = [[ struct.pack( 'BBB', uint8_dataset[0, i, j], uint8_dataset[1, i, j], uint8_dataset[2, i, j] ) for j in range(ncol)] for i in range(nrow)] return image
def get_image(dataset)
Convert the NumPy array to two nested lists with r,g,b tuples.
4.482162
4.283247
1.04644
nbits = max(math.ceil(math.log(num_colors, 2)), 2) return '{:03b}'.format(int(nbits - 1))
def get_color_table_size(num_colors)
Total values in the color table is 2**(1 + int(result, base=2)). The result is a three-bit value (represented as a string with ones or zeros) that will become part of a packed byte encoding various details about the color table, used in the Logical Screen Descriptor block.
3.889806
4.51002
0.862481
colors = Counter(pixel for row in image for pixel in row) if len(colors) > 256: msg = ( "The maximum number of distinct colors in a GIF is 256 but " "this image has {} colors and can't be encoded properly." ) raise RuntimeError(msg.format(len(colors))) return colors
def get_colors(image)
Return a Counter containing each color and how often it appears.
4.909129
4.272678
1.148958
global_color_table = b''.join(c[0] for c in colors.most_common()) full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2)) repeats = 3 * (full_table_size - len(colors)) zeros = struct.pack('<{}x'.format(repeats)) return global_color_table + zeros
def _get_global_color_table(colors)
Return a color table sorted in descending order of count.
5.546288
5.265185
1.053389
lzw_code_size, coded_bits = _lzw_encode(image, colors) coded_bytes = ''.join( '{{:0{}b}}'.format(nbits).format(val) for val, nbits in coded_bits) coded_bytes = '0' * ((8 - len(coded_bytes)) % 8) + coded_bytes coded_data = list( reversed([ int(coded_bytes[8*i:8*(i+1)], 2) for i in range(len(coded_bytes) // 8) ]) ) output = [struct.pack('<B', lzw_code_size)] # Must output the data in blocks of length 255 block_length = min(255, len(coded_data)) while block_length > 0: block = struct.pack( '<{}B'.format(block_length + 1), block_length, *coded_data[:block_length] ) output.append(block) coded_data = coded_data[block_length:] block_length = min(255, len(coded_data)) return b''.join(output)
def _get_image_data(image, colors)
Performs the LZW compression as described by Matthew Flickinger. This isn't fast, but it works. http://www.matthewflickinger.com/lab/whatsinagif/lzw_image_data.asp
2.623998
2.575447
1.018851
try: check_dataset(dataset) except ValueError as e: dataset = try_fix_dataset(dataset) check_dataset(dataset) delay_time = 100 // int(fps) def encode(d): four_d = isinstance(dataset, numpy.ndarray) and len(dataset.shape) == 4 if four_d or not isinstance(dataset, numpy.ndarray): return _make_animated_gif(d, delay_time=delay_time) else: return _make_gif(d) with open(filename, 'wb') as outfile: outfile.write(HEADER) for block in encode(dataset): outfile.write(block) outfile.write(TRAILER)
def write_gif(dataset, filename, fps=10)
Write a NumPy array to GIF 89a format. Or write a list of NumPy arrays to an animation (GIF 89a format). - Positional arguments:: :param dataset: A NumPy arrayor list of arrays with shape rgb x rows x cols and integer values in [0, 255]. :param filename: The output file that will contain the GIF image. :param fps: The (integer) frames/second of the animation (default 10). :type dataset: a NumPy array or list of NumPy arrays. :return: None - Example: a minimal array, with one red pixel, would look like this:: import numpy as np one_red_pixel = np.array([[[255]], [[0]], [[0]]]) write_gif(one_red_pixel, 'red_pixel.gif') ..raises:: ValueError
3.852461
4.031632
0.955559
logging.basicConfig(level=logging.DEBUG) # create the application and the main window app = QtWidgets.QApplication(sys.argv) window = QtWidgets.QMainWindow() # setup ui ui = example_ui.Ui_MainWindow() ui.setupUi(window) ui.bt_delay_popup.addActions([ ui.actionAction, ui.actionAction_C ]) ui.bt_instant_popup.addActions([ ui.actionAction, ui.actionAction_C ]) ui.bt_menu_button_popup.addActions([ ui.actionAction, ui.actionAction_C ]) window.setWindowTitle('QDarkGrayStyle example') # tabify dock widgets to show bug #6 window.tabifyDockWidget(ui.dockWidget1, ui.dockWidget2) # setup stylesheet app.setStyleSheet(qdarkgraystyle.load_stylesheet()) # auto quit after 2s when testing on travis-ci if '--travis' in sys.argv: QtCore.QTimer.singleShot(2000, app.exit) # run window.show() app.exec_()
def main()
Application entry point
3.729223
3.61806
1.030725
# Smart import of the rc file f = QtCore.QFile(':qdarkgraystyle/style.qss') if not f.exists(): _logger().error('Unable to load stylesheet, file not found in ' 'resources') return '' else: f.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text) ts = QtCore.QTextStream(f) stylesheet = ts.readAll() if platform.system().lower() == 'darwin': # see issue #12 on github mac_fix = ''' QDockWidget::title { background-color: #31363b; text-align: center; height: 12px; } ''' stylesheet += mac_fix return stylesheet
def load_stylesheet()
Loads the stylesheet for use in a pyqt5 application. :return the stylesheet string
4.442508
4.30756
1.031328
# If input is not flow, then create from iamge sequence try: assert image_sequence_or_flow.ndim == 1 flow_org = image_sequence_or_flow except AssertionError: flow_org = tracking.optical_flow_magnitude(image_sequence_or_flow) # Gyro from gyro data gyro_mag = np.sum(gyro_data**2, axis=0) flow_timestamps = image_timestamps[:-2] # Resample to match highest rate = lambda ts: len(ts) / (ts[-1] - ts[0]) freq_gyro = rate(gyro_timestamps) freq_image = rate(flow_timestamps) if freq_gyro > freq_image: rel_rate = freq_gyro / freq_image flow_mag = znccpyr.upsample(flow_org, rel_rate) else: flow_mag = flow_org rel_rate = freq_image / freq_gyro gyro_mag = znccpyr.upsample(gyro_mag, rel_rate) ishift = znccpyr.find_shift_pyr(flow_mag, gyro_mag, levels) if freq_gyro > freq_image: flow_shift = int(-ishift / rel_rate) else: flow_shift = int(-ishift) time_offset = flow_timestamps[flow_shift] if full_output: return time_offset, flow_org # Return the orginal flow, not the upsampled version else: return time_offset
def sync_camera_gyro(image_sequence_or_flow, image_timestamps, gyro_data, gyro_timestamps, levels=6, full_output=False)
Get time offset that aligns image timestamps with gyro timestamps. Given an image sequence, and gyroscope data, with their respective timestamps, calculate the offset that aligns the image data with the gyro data. The timestamps must only differ by an offset, not a scale factor. This function finds an approximation of the offset *d* that makes this transformation t_gyro = t_camera + d i.e. your new image timestamps should be image_timestamps_aligned = image_timestamps + d The offset is calculated using zero-mean cross correlation of the gyroscope data magnitude and the optical flow magnitude, calculated from the image sequence. ZNCC is performed using pyramids to make it quick. The offset is accurate up to about +/- 2 frames, so you should run *refine_time_offset* if you need better accuracy. Parameters --------------- image_sequence_or_flow : sequence of image data, or ndarray This must be either a list or generator that provides a stream of images that are used for optical flow calculations. image_timestamps : ndarray Timestamps of the images in image_sequence gyro_data : (3, N) ndarray Gyroscope measurements (angular velocity) gyro_timestamps : ndarray Timestamps of data in gyro_data levels : int Number of pyramid levels full_output : bool If False, only return the offset, otherwise return extra data Returns -------------- time_offset : float The time offset to add to image_timestamps to align the image data with the gyroscope data flow : ndarray (Only if full_output=True) The calculated optical flow magnitude
4.092644
3.953516
1.035191
flow = tracking.optical_flow_magnitude(image_sequence) flow_timestamps = image_timestamps[:-2] # Let user select points in both pieces of data (frame_pair, gyro_idx) = manual_sync_pick(flow, gyro_timestamps, gyro_data) # Normalize data gyro_abs_max = np.max(np.abs(gyro_data), axis=0) gyro_normalized = (gyro_abs_max / np.max(gyro_abs_max)).flatten() flow_normalized = (flow / np.max(flow)).flatten() rate = lambda ts: len(ts) / (ts[-1] - ts[0]) # Resample to match highest freq_gyro = rate(gyro_timestamps) freq_image = rate(flow_timestamps) logger.debug("Gyro sampling frequency: %.2f Hz, Image sampling frequency: %.2f Hz", freq_gyro, freq_image) gyro_part = gyro_normalized[gyro_idx[0]:gyro_idx[1]+1] # only largest flow_part = flow_normalized[frame_pair[0]:frame_pair[1]+1] N = flow_part.size * freq_gyro / freq_image flow_part_resampled = ssig.resample(flow_part, N).flatten() # ) Cross correlate the two signals and find time diff corr = ssig.correlate(gyro_part, flow_part_resampled, 'full') # Find the flow in gyro data i = np.argmax(corr) t_0_f = flow_timestamps[frame_pair[0]] t_1_f = flow_timestamps[frame_pair[1]] t_off_g = gyro_timestamps[gyro_idx[0] + i] t_off_f = t_1_f time_offset = t_off_g - t_off_f if full_output: return time_offset, flow, frame_pair else: return time_offset
def sync_camera_gyro_manual(image_sequence, image_timestamps, gyro_data, gyro_timestamps, full_output=False)
Get time offset that aligns image timestamps with gyro timestamps. Given an image sequence, and gyroscope data, with their respective timestamps, calculate the offset that aligns the image data with the gyro data. The timestamps must only differ by an offset, not a scale factor. This function finds an approximation of the offset *d* that makes this transformation t_gyro = t_camera + d i.e. your new image timestamps should be image_timestamps_aligned = image_timestamps + d The offset is calculated using correlation. The parts of the signals to use are chosen by the user by picking points in a plot window. The offset is accurate up to about +/- 2 frames, so you should run *refine_time_offset* if you need better accuracy. Parameters --------------- image_sequence : sequence of image data This must be either a list or generator that provides a stream of images that are used for optical flow calculations. image_timestamps : ndarray Timestamps of the images in image_sequence gyro_data : (3, N) ndarray Gyroscope measurements (angular velocity) gyro_timestamps : ndarray Timestamps of data in gyro_data full_output : bool If False, only return the offset, otherwise return extra data Returns -------------- time_offset : float The time offset to add to image_timestamps to align the image data with the gyroscope data flow : ndarray (Only if full_output=True) The calculated optical flow magnitude frame_pair : (int, int) The frame pair that was picked for synchronization
4.257293
4.069656
1.046106
endpoints = [] in_low = False for i, val in enumerate(flow): if val < motion_threshold: if not in_low: endpoints.append(i) in_low = True else: if in_low: endpoints.append(i-1) # Previous was last in a low spot in_low = False def mean_score_func(m): mu = 15 sigma = 8 top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def max_score_func(m): mu = 40 sigma = 8 if m <= mu: return 1. else: top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def length_score_func(l): mu = 30 sigma = 10 top_val = normpdf(mu, mu, sigma) return normpdf(l, mu, sigma) / top_val min_length = 5 # frames sequences = [] for k, i in enumerate(endpoints[:-1]): for j in endpoints[k+1:]: length = j - i if length < min_length: continue seq = flow[i:j+1] m_score = mean_score_func(np.mean(seq)) mx_score = max_score_func(np.max(seq)) l_score = length_score_func(length) logger.debug("%d, %d scores: (mean=%.5f, max=%.5f, length=%.5f)" % (i,j,m_score, mx_score, l_score)) if min(m_score, mx_score, l_score) < 0.2: continue score = m_score + mx_score + l_score sequences.append((i, j, score)) return sorted(sequences, key=lambda x: x[2], reverse=True)
def good_sequences_to_track(flow, motion_threshold=1.0)
Get list of good frames to do tracking in. Looking at the optical flow, this function chooses a span of frames that fulfill certain criteria. These include * not being too short or too long * not too low or too high mean flow magnitude * a low max value (avoids motion blur) Currently, the cost function for a sequence is hard coded. Sorry about that. Parameters ------------- flow : ndarray The optical flow magnitude motion_threshold : float The maximum amount of motion to consider for sequence endpoints. Returns ------------ sequences : list Sorted list of (a, b, score) elements (highest scpre first) of sequences where a sequence is frames with frame indices in the span [a, b].
2.494828
2.509679
0.994082
self.params['user']['gyro_rate'] = gyro_rate for p in ('gbias_x', 'gbias_y', 'gbias_z'): self.params['initialized'][p] = 0.0 if slices is not None: self.slices = slices if self.slices is None: self.slices = videoslice.Slice.from_stream_randomly(self.video) logger.debug("Number of slices: {:d}".format(len(self.slices))) if len(self.slices) < 2: logger.error("Calibration requires at least 2 video slices to proceed, got %d", len(self.slices)) raise InitializationError("Calibration requires at least 2 video slices to proceed, got {:d}".format(len(self.slices))) if not skip_estimation: time_offset = self.find_initial_offset() # TODO: Detect when time offset initialization fails, and raise InitializationError R = self.find_initial_rotation() if R is None: raise InitializationError("Failed to calculate initial rotation")
def initialize(self, gyro_rate, slices=None, skip_estimation=False)
Prepare calibrator for calibration This method does three things: 1. Create slices from the video stream, if not already provided 2. Estimate time offset 3. Estimate rotation between camera and gyroscope Parameters ------------------ gyro_rate : float Estimated gyroscope sample rate slices : list of Slice, optional Slices to use for optimization skip_estimation : bool Do not estimate initial time offset and rotation. Raises -------------------- InitializationError If the initialization fails
4.047801
3.544798
1.141899
f_g = self.parameter['gyro_rate'] d_c = self.parameter['time_offset'] n = f_g * (t + d_c) n0 = int(np.floor(n)) tau = n - n0 return n0, tau
def video_time_to_gyro_sample(self, t)
Convert video time to gyroscope sample index and interpolation factor Parameters ------------------- t : float Video timestamp Returns -------------------- n : int Sample index that precedes t tau : float Interpolation factor [0.0-1.0]. If tau=0, then t falls on exactly n. If tau=1 then t falls exactly on n+1
5.200774
5.176226
1.004742
D = {} for source in PARAM_SOURCE_ORDER: D.update(self.params[source]) return D
def parameter(self)
Return the current best value of a parameter
9.573488
10.792624
0.88704
x0 = np.array([self.parameter[param] for param in PARAM_ORDER]) available_tracks = np.sum([len(s.inliers) for s in self.slices]) if available_tracks < max_tracks: warnings.warn("Could not use the requested {} tracks, since only {} were available in the slice data.".format(max_tracks, available_tracks)) max_tracks = available_tracks # Get subset of available tracks such that all slices are still used slice_sample_idxs = videoslice.fill_sampling(self.slices, max_tracks) func_args = (self.slices, slice_sample_idxs, self.video.camera_model, self.gyro, norm_c) self.slice_sample_idxs = slice_sample_idxs logger.debug("Starting optimization on {:d} slices and {:d} tracks".format(len(self.slices), max_tracks)) start_time = time.time() # TODO: Check what values of ftol and xtol are required for good results. The current setting is probably pessimistic. leastsq_result = scipy.optimize.leastsq(optimization_func, x0, args=func_args, full_output=True, ftol=1e-10, xtol=1e-10, maxfev=max_eval) elapsed = time.time() - start_time x, covx, infodict, mesg, ier = leastsq_result self.__debug_leastsq = leastsq_result logger.debug("Optimization completed in {:.1f} seconds and {:d} function evaluations. ier={}, mesg='{}'".format(elapsed, infodict['nfev'], ier, mesg)) if ier in (1,2,3,4): for pname, val in zip(PARAM_ORDER, x): self.params['calibrated'][pname] = val return self.parameter else: raise CalibrationError(mesg)
def calibrate(self, max_tracks=MAX_OPTIMIZATION_TRACKS, max_eval=MAX_OPTIMIZATION_FEV, norm_c=DEFAULT_NORM_C)
Perform calibration Parameters ---------------------- max_eval : int Maximum number of function evaluations Returns --------------------- dict Optimization result Raises ----------------------- CalibrationError If calibration fails
3.935332
4.162555
0.945413
flow = self.video.flow gyro_rate = self.parameter['gyro_rate'] frame_times = np.arange(len(flow)) / self.video.frame_rate gyro_times = np.arange(self.gyro.num_samples) / gyro_rate time_offset = timesync.sync_camera_gyro(flow, frame_times, self.gyro.data.T, gyro_times, levels=pyramids) logger.debug("Initial time offset: {:.4f}".format(time_offset)) self.params['initialized']['time_offset'] = time_offset return time_offset
def find_initial_offset(self, pyramids=6)
Estimate time offset This sets and returns the initial time offset estimation. Parameters --------------- pyramids : int Number of pyramids to use for ZNCC calculations. If initial estimation of time offset fails, try lowering this value. Returns --------------- float Estimated time offset
5.458827
6.024276
0.906138
if 'time_offset' not in self.parameter: raise InitializationError("Can not estimate rotation without an estimate of time offset. Please estimate the offset and try again.") dt = float(1.0 / self.parameter['gyro_rate']) # Must be python float for fastintegrate q = self.gyro.integrate(dt) video_axes = [] gyro_axes = [] for _slice in self.slices: # Estimate rotation here _slice.estimate_rotation(self.video.camera_model, ransac_threshold=7.0) # sets .axis and .angle memebers if _slice.axis is None: continue assert _slice.angle > 0 t1 = _slice.start / self.video.frame_rate n1, _ = self.video_time_to_gyro_sample(t1) t2 = _slice.end / self.video.frame_rate n2, _ = self.video_time_to_gyro_sample(t2) try: qx = q[n1] qy = q[n2] except IndexError: continue # No gyro data -> nothing to do with this slice Rx = rotations.quat_to_rotation_matrix(qx) Ry = rotations.quat_to_rotation_matrix(qy) R = np.dot(Rx.T, Ry) v, theta = rotations.rotation_matrix_to_axis_angle(R) if theta < 0: v = -v gyro_axes.append(v) video_axes.append(_slice.axis) if len(gyro_axes) < 2: logger.warning("Rotation estimation requires at least 2 rotation axes, got {}".format(len(gyro_axes))) return None logger.debug("Using {:d} slices (from initial {:d} for rotation estimation".format(len(gyro_axes), len(self.slices))) model_func = lambda data: rotations.procrustes(data[:3], data[3:6], remove_mean=False)[0] def eval_func(model, data): X = data[:3].reshape(3,-1) Y = data[3:6].reshape(3,-1) R = model Xhat = np.dot(R, Y) costheta = np.sum(Xhat*X, axis=0) theta = np.arccos(costheta) return theta inlier_selection_prob = 0.99999 model_points = 2 # Set to 3 to use non-minimal case inlier_ratio = 0.5 threshold = np.deg2rad(10.0) ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points)) data = np.vstack((np.array(video_axes).T, np.array(gyro_axes).T)) assert data.shape == (6, len(gyro_axes)) R, ransac_conseus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True) n, theta = rotations.rotation_matrix_to_axis_angle(R) logger.debug("Found rotation: n={} theta={}; r={}".format(n, theta, n*theta)) logger.debug(R) rx, ry, rz = theta * n self.params['initialized']['rot_x'] = rx self.params['initialized']['rot_y'] = ry self.params['initialized']['rot_z'] = rz return R
def find_initial_rotation(self)
Estimate rotation between camera and gyroscope This sets and returns the initial rotation estimate. Note that the initial time offset must have been estimated before calling this function! Returns -------------------- (3,3) ndarray Estimated rotation between camera and gyroscope
4.248617
4.198574
1.011919
print("Parameters") print("--------------------") for param in PARAM_ORDER: print(' {:>11s} = {}'.format(param, self.parameter[param]))
def print_params(self)
Print the current best set of parameters
6.482729
6.339329
1.022621
gf1 = cv2.getGaussianKernel(ksize, gstd1) gf2 = cv2.getGaussianKernel(ksize, gstd2) gf3 = cv2.getGaussianKernel(ksize, gstd3) sqrtimg = cv2.sqrt(img) p1 = cv2.sepFilter2D(sqrtimg, -1, gf1, gf1) p2 = cv2.sepFilter2D(sqrtimg, -1, gf2, gf2) maxarr = np.maximum(0, (p1 - p2) / p2) minarr = np.minimum(w * maxarr, 1) p = 1 - minarr nc = cv2.sepFilter2D(p, -1, gf3, gf3) + EPS output = cv2.sepFilter2D(p*sqrtimg, -1, gf3, gf3) output = (output / nc) ** 2 # Since input is sqrted return output
def remove_slp(img, gstd1=GSTD1, gstd2=GSTD2, gstd3=GSTD3, ksize=KSIZE, w=W)
Remove the SLP from kinect IR image The input image should be a float32 numpy array, and should NOT be a square root image Parameters ------------------ img : (M, N) float ndarray Kinect NIR image with SLP pattern gstd1 : float Standard deviation of gaussian kernel 1 gstd2 : float Standard deviation of gaussian kernel 2 gstd3 : float Standard deviation of gaussian kernel 3 ksize : int Size of kernel (should be odd) w : float Weighting factor Returns ------------------ img_noslp : (M,N) float ndarray Input image with SLP removed
2.952372
3.010046
0.980839
import h5py with h5py.File(filename, 'r') as f: wc = f["wc"].value lgamma = f["lgamma"].value K = f["K"].value readout = f["readout"].value image_size = f["size"].value fps = f["fps"].value instance = cls(image_size, fps, readout, K, wc, lgamma) return instance
def from_hdf(cls, filename)
Load camera model params from a HDF5 file The HDF5 file should contain the following datasets: wc : (2,) float with distortion center lgamma : float distortion parameter readout : float readout value size : (2,) int image size fps : float frame rate K : (3, 3) float camera matrix Parameters -------------------- filename : str Path to file with parameters Returns --------------------- AtanCameraModel Camera model instance
3.812725
2.035504
1.873111
X = points if not points.ndim == 1 else points.reshape((points.size, 1)) wx, wy = self.wc # Switch to polar coordinates rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2) phi = np.arctan2(X[1,:] - wy, X[0,:]-wx) # 'atan' method r = np.tan(rn * self.lgamma) / self.lgamma; # Switch back to rectangular coordinates Y = np.ones(X.shape) Y[0,:] = wx + r * np.cos(phi) Y[1,:]= wy + r * np.sin(phi) return Y
def invert(self, points)
Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points
4.245648
4.732121
0.897198
K = self.camera_matrix XU = points XU = XU / np.tile(XU[2], (3,1)) X = self.apply(XU) x2d = np.dot(K, X) return from_homogeneous(x2d)
def project(self, points)
Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane
5.754476
6.549926
0.878556
Ki = self.inv_camera_matrix X = np.dot(Ki, to_homogeneous(image_points)) X = X / X[2] XU = self.invert(X) return XU
def unproject(self, image_points)
Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale)
6.2766
7.369689
0.851678
rvec = tvec = np.zeros(3) image_points, jac = cv2.projectPoints(points.T.reshape(-1,1,3), rvec, tvec, self.camera_matrix, self.dist_coefs) return image_points.reshape(-1,2).T
def project(self, points)
Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane
2.899207
3.16976
0.914646
undist_image_points = cv2.undistortPoints(image_points.T.reshape(1,-1,2), self.camera_matrix, self.dist_coefs, P=self.camera_matrix) world_points = np.dot(self.inv_camera_matrix, to_homogeneous(undist_image_points.reshape(-1,2).T)) return world_points
def unproject(self, image_points)
Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale)
2.847396
3.274031
0.869691
"Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps)." timestamps = np.array([Kinect.timestamp_from_filename(fname) for fname in file_list]) # Handle overflow diff = np.diff(timestamps) idxs = np.flatnonzero(diff < 0) ITEM_SIZE = 2**32 for i in idxs: timestamps[i+1:] += ITEM_SIZE return timestamps.flatten()
def timestamps_from_file_list(file_list)
Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps).
8.074514
3.83679
2.104497
"Given a list of image files, find bad frames, remove them and modify file_list" MAX_INITIAL_BAD_FRAMES = 15 bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list)) # Trivial case if not bad_ts: return file_list # No bad frames after the initial allowed last_bad = max(bad_ts) if last_bad >= MAX_INITIAL_BAD_FRAMES: raise Exception('Only 15 initial bad frames are allowed, but last bad frame is %d' % last_bad) # Remove all frames up to the last bad frame for i in range(last_bad + 1): os.remove(file_list[i]) # Purge from the list file_list = file_list[last_bad+1:] return file_list
def purge_bad_timestamp_files(file_list)
Given a list of image files, find bad frames, remove them and modify file_list
4.698282
3.904018
1.203448
(root, filename) = os.path.split(video_filename) needle_ts = int(filename.split('-')[2].split('.')[0]) haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list)) haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0] depth_filename = depth_file_list[haystack_idx] return depth_filename
def depth_file_for_nir_file(video_filename, depth_file_list)
Returns the corresponding depth filename given a NIR filename
3.254307
3.157841
1.030548
(root, filename) = os.path.split(rgb_filename) rgb_timestamps = np.array(Kinect.timestamps_from_file_list(rgb_file_list)) depth_timestamps = np.array(Kinect.timestamps_from_file_list(depth_file_list)) needle_ts = rgb_timestamps[rgb_file_list.index(rgb_filename)] haystack_idx = np.argmin(np.abs(depth_timestamps - needle_ts)) depth_filename = depth_file_list[haystack_idx] return depth_filename
def depth_file_for_rgb_file(rgb_filename, rgb_file_list, depth_file_list)
Returns the *closest* depth file from an RGB filename
2.679957
2.571733
1.042082
"Remove all files without its own counterpart. Returns new lists of files" new_video_list = [] new_depth_list = [] for fname in video_file_list: try: depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list) new_video_list.append(fname) new_depth_list.append(depth_file) except IndexError: # Missing file pass # Purge bad files bad_nir = [f for f in video_file_list if f not in new_video_list] bad_depth = [f for f in depth_file_list if f not in new_depth_list] return (new_video_list, new_depth_list, bad_nir, bad_depth)
def find_nir_file_with_missing_depth(video_file_list, depth_file_list)
Remove all files without its own counterpart. Returns new lists of files
3.25508
2.486346
1.309182
"Convert image of Kinect disparity values to distance (linear method)" dist_img = dval_img / 2048.0 dist_img = 1 / (self.opars[0]*dist_img + self.opars[1]) return dist_img
def disparity_image_to_distance(self, dval_img)
Convert image of Kinect disparity values to distance (linear method)
8.453995
4.948099
1.708534
A = [len(s.inliers) for s in slice_list] N_max = np.sum(A) if N > N_max: raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max)) samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group remaining = N while remaining > 0: remaining_groups = np.flatnonzero(samples_from - np.array(A)) if remaining < len(remaining_groups): np.random.shuffle(remaining_groups) for g in remaining_groups[:remaining]: samples_from[g] += 1 else: # Give each group the allowed number of samples. Constrain to their max size. to_each = max(1, int(remaining / len(remaining_groups))) samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0) # Update remaining count remaining = int(N - np.sum(samples_from)) if not remaining == 0: raise ValueError("Still {:d} samples left! This is an error in the selection.") # Construct index list of selected samples samples = [] for s, a, n in zip(slice_list, A, samples_from): if a == n: samples.append(np.array(s.inliers)) # all elif a == 0: samples.append(np.arange([])) else: chosen = np.random.choice(s.inliers, n, replace=False) samples.append(np.array(chosen)) return samples
def fill_sampling(slice_list, N)
Given a list of slices, draw N samples such that each slice contributes as much as possible Parameters -------------------------- slice_list : list of Slice List of slices N : int Number of samples to draw
3.684188
3.763504
0.978925
if self.axis is None: x = self.points[:, 0, :].T y = self.points[:, -1, :].T inlier_ratio = 0.5 R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y, camera, ransac_threshold, inlier_ratio=inlier_ratio, do_translation=False) if R is not None: self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R) if self.angle < 0: # Constrain to positive angles self.angle = -self.angle self.axis = -self.axis self.inliers = idx return self.axis is not None
def estimate_rotation(self, camera, ransac_threshold=7.0)
Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier
4.225238
4.518031
0.935195
new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1])) new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1])) seq_frames = [] slices = [] seq_start_points = None next_seq_start = new_step() if max_start is None else min(new_step(), max_start) next_seq_length = new_length() for i, im in enumerate(video_stream): if next_seq_start <= i < next_seq_start + next_seq_length: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) seq_frames.append(im) if len(seq_frames) == 1: max_corners = 400 quality_level = 0.07 seq_start_points = cv2.goodFeaturesToTrack(im, max_corners, quality_level, min_distance) elif len(seq_frames) == next_seq_length: points, status = tracking.track_retrack(seq_frames, seq_start_points) if points.shape[0] >= min_slice_points: s = Slice(next_seq_start, i, points) slices.append(s) logger.debug('{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'.format(len(slices)-1, points.shape[1], points.shape[0], next_seq_start, i)) seq_frames = [] next_seq_start = i + new_step() next_seq_length = new_length() return slices
def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10)
Create slices from a video stream using random sampling Parameters ----------------- video_stream : VideoStream A video stream step_bounds : tuple Range bounds (inclusive) of possible step lengths length_bounds : tuple Range bounds (inclusive) of possible slice lengths max_start : int Maximum frame number to start from min_distance : float Minimum (initial) distance between tracked points min_slice_points : int Minimum number of points to keep a slice Returns ------------------- list of Slice List of slices
2.564723
2.656633
0.965403
assert X.shape == Y.shape assert X.shape[0] > 1 # Minimal case, create third point using cross product if X.shape[0] == 2: X3 = np.cross(X[:,0], X[:,1], axis=0) X = np.hstack((X, X3 / np.linalg.norm(X3))) Y3 = np.cross(Y[:,0], Y[:,1], axis=0) Y = np.hstack((Y, Y3 / np.linalg.norm(Y3))) D, N = X.shape[:2] if remove_mean: mx = np.mean(X, axis=1).reshape(D, 1) my = np.mean(Y, axis=1).reshape(D, 1) Xhat = X - mx Yhat = Y - my else: Xhat = X Yhat = Y (U, S, V) = np.linalg.svd((Xhat).dot(Yhat.T)) Dtmp = np.eye(Xhat.shape[0]) Dtmp[-1,-1] = np.linalg.det(U.dot(V)) R_est = U.dot(Dtmp).dot(V) # Now X=R_est*(Y-my)+mx=R_est*Y+t_est if remove_mean: t_est= mx - R_est.dot(my) else: t_est = None return (R_est, t_est)
def procrustes(X, Y, remove_mean=False)
Orthogonal procrustes problem solver The procrustes problem finds the best rotation R, and translation t where X = R*Y + t The number of points in X and Y must be at least 2. For the minimal case of two points, a third point is temporarily created and used for the estimation. Parameters ----------------- X : (3, N) ndarray First set of points Y : (3, N) ndarray Second set of points remove_mean : bool If true, the mean is removed from X and Y before solving the procrustes problem. Can yield better results in some applications. Returns ----------------- R : (3,3) ndarray Rotation component t : (3,) ndarray Translation component (None if remove_mean is False)
2.611223
2.453054
1.064478
assert R.shape == (3,3) assert_almost_equal(np.linalg.det(R), 1.0, err_msg="Not a rotation matrix: determinant was not 1") S, V = np.linalg.eig(R) k = np.argmin(np.abs(S - 1.)) s = S[k] assert_almost_equal(s, 1.0, err_msg="Not a rotation matrix: No eigen value s=1") v = np.real(V[:, k]) # Result is generally complex vhat = np.array([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]]) sintheta = 0.5 * np.dot(v, vhat) costheta = 0.5 * (np.trace(R) - 1) theta = np.arctan2(sintheta, costheta) return (v, theta)
def rotation_matrix_to_axis_angle(R)
Convert a 3D rotation matrix to a 3D axis angle representation Parameters --------------- R : (3,3) array Rotation matrix Returns ---------------- v : (3,) array (Unit-) rotation angle theta : float Angle of rotations, in radians Note -------------- This uses the algorithm as described in Multiple View Geometry, p. 584
2.606009
2.565317
1.015862
if np.abs(theta) < np.spacing(1): return np.eye(3) else: v = v.reshape(3,1) np.testing.assert_almost_equal(np.linalg.norm(v), 1.) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) vvt = np.dot(v, v.T) R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta) return R
def axis_angle_to_rotation_matrix(v, theta)
Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix
2.229653
2.382759
0.935745
q = q.flatten() assert q.size == 4 assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!") qq = q ** 2 R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] - 2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]], [2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] - qq[3], 2*q[2]*q[3] - 2*q[0]*q[1]], [2*q[1]*q[3] - 2*q[0]*q[2], 2*q[2]*q[3] + 2*q[0]*q[1], qq[0] - qq[1] - qq[2] + qq[3]]]) return R
def quat_to_rotation_matrix(q)
Convert unit quaternion to rotation matrix Parameters ------------- q : (4,) ndarray Unit quaternion, scalar as first element Returns ---------------- R : (3,3) ndarray Rotation matrix
1.545731
1.592733
0.97049
#NB: Quaternion q = [a, n1, n2, n3], scalar first q_list = np.zeros((gyro_ts.shape[0], 4)) # Nx4 quaternion list q_list[0,:] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all (except first) for i in range(1, gyro_ts.size): w = gyro_data[i] dt = gyro_ts[i] - gyro_ts[i - 1] qprev = q_list[i - 1] A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew /= qnorm q_list[i] = qnew return q_list
def integrate_gyro_quaternion(gyro_ts, gyro_data)
Integrate angular velocities to rotations Parameters --------------- gyro_ts : ndarray Timestamps gyro_data : (3, N) ndarray Angular velocity measurements Returns --------------- rotations : (4, N) ndarray Rotation sequence as unit quaternions (first element scalar)
2.751794
2.726628
1.009229
q1 = q1.flatten() q2 = q2.flatten() assert q1.shape == q2.shape assert q1.size == 4 costheta = np.dot(q1, q2) if np.isclose(u, 0.): return q1 elif np.isclose(u, 1.): return q2 elif u > 1 or u < 0: raise ValueError("u must be in range [0, 1]") # Shortest path if costheta < 0: costheta = -costheta q2 = -q2 # Almost the same, we can return any of them? if np.isclose(costheta, 1.0): return q1 theta = np.arccos(costheta) f1 = np.sin((1.0 - u)*theta) / np.sin(theta) f2 = np.sin(u*theta) / np.sin(theta) q = f1*q1 + f2*q2 q = q / np.sqrt(np.sum(q**2)) # Normalize return q
def slerp(q1, q2, u)
SLERP: Spherical linear interpolation between two unit quaternions. Parameters ------------ q1 : (4, ) ndarray Unit quaternion (first element scalar) q2 : (4, ) ndarray Unit quaternion (first element scalar) u : float Interpolation factor in range [0,1] where 0 is first quaternion and 1 is second quaternion. Returns ----------- q : (4,) ndarray The interpolated unit quaternion
2.268945
2.349572
0.965684
assert x.shape == y.shape assert x.shape[0] == 2 X = camera.unproject(x) Y = camera.unproject(y) data = np.vstack((X, Y, x)) assert data.shape[0] == 8 model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation) def eval_func(model, data): Y = data[3:6].reshape(3,-1) x = data[6:].reshape(2,-1) R, t = model Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t xhat = camera.project(Xhat) dist = np.sqrt(np.sum((x-xhat)**2, axis=0)) return dist inlier_selection_prob = 0.99999 model_points = 2 ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points)) model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True) if model_est is not None: (R, t) = model_est dist = eval_func((R, t), data) else: dist = None R, t = None, None ransac_consensus_idx = [] return R, t, dist, ransac_consensus_idx
def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False)
Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers
3.174117
3.225483
0.984075
M = None max_consensus = 0 all_idx = list(range(data.shape[1])) final_consensus = [] for k in range(num_iter): np.random.shuffle(all_idx) model_set = all_idx[:num_points] x = data[:, model_set] m = model_func(x) model_error = eval_func(m, data) assert model_error.ndim == 1 assert model_error.size == data.shape[1] consensus_idx = np.flatnonzero(model_error < threshold) if len(consensus_idx) > max_consensus: M = m max_consensus = len(consensus_idx) final_consensus = consensus_idx # Recalculate using current consensus set? if recalculate and len(final_consensus) > 0: final_consensus_set = data[:, final_consensus] M = model_func(final_consensus_set) return (M, final_consensus)
def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False)
Apply RANSAC. This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set. Parameters ------------ model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector) eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model data : array (DxN) where D is dimensionality and N number of samples
2.693147
2.973257
0.90579
params = GFTT_DEFAULTS if gftt_params: params.update(gftt_params) if initial_points is None: initial_points = cv2.goodFeaturesToTrack(img1, params['max_corners'], params['quality_level'], params['min_distance']) [_points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, initial_points, np.array([])) # Filter out valid points only points = _points[np.nonzero(status)] initial_points = initial_points[np.nonzero(status)] return (points, initial_points)
def track_points(img1, img2, initial_points=None, gftt_params={})
Track points between two images Parameters ----------------- img1 : (M, N) ndarray First image img2 : (M, N) ndarray Second image initial_points : ndarray Initial points. If empty, initial points will be calculated from img1 using goodFeaturesToTrack in OpenCV gftt_params : dict Keyword arguments for goodFeaturesToTrack Returns ----------------- points : ndarray Tracked points initial_points : ndarray Initial points used
2.553009
2.980609
0.856539
flow = [] prev_img = None for img in image_sequence: if img.ndim == 3 and img.shape[2] == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if prev_img is None: prev_img = img continue (next_points, prev_points) = track_points(prev_img, img, gftt_params=gftt_options) distance = np.sqrt(np.sum((next_points - prev_points)**2, 1)) distance2 = distance[np.nonzero(distance < max_diff)] # Crude outlier rejection dm = np.mean(distance2) if np.isnan(dm): dm = 0 flow.append(dm) prev_img = img return np.array(flow)
def optical_flow_magnitude(image_sequence, max_diff=60, gftt_options={})
Return optical flow magnitude for the given image sequence The flow magnitude is the mean value of the total (sparse) optical flow between two images. Crude outlier detection using the max_diff parameter is used. Parameters ---------------- image_sequence : sequence Sequence of image data (ndarrays) to calculate flow magnitude from max_diff : float Distance threshold for outlier rejection gftt_options : dict Keyword arguments to the OpenCV goodFeaturesToTrack function Returns ---------------- flow : ndarray The optical flow magnitude
2.576895
2.572788
1.001596
# Precreate track array tracks = np.zeros((initial_points.shape[0], len(image_list), 2), dtype='float32') # NxMx2 tracks[:,0,:] = np.reshape(np.array(initial_points), [-1,2]) track_status = np.ones([np.size(initial_points,0),1]) # All initial points are OK empty = np.array([]) window_size = (5,5) for i in range(1, len(image_list)): img1 = image_list[i-1] img2 = image_list[i] prev_ok_track = np.flatnonzero(track_status) prev_points = tracks[prev_ok_track,i-1,:] [points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, prev_points, empty, empty, empty, window_size) if status is None: track_status[:] = 0 # All tracks are bad break valid_set = np.flatnonzero(status) now_ok_tracks = prev_ok_track[valid_set] # Remap tracks[now_ok_tracks,i,:] = points[valid_set] track_status[prev_ok_track] = status if remove_bad: final_ok = np.flatnonzero(track_status) tracks = tracks[final_ok] # Only rows/tracks with nonzero status track_status = track_status[final_ok] return (tracks, track_status)
def track(image_list, initial_points, remove_bad=True)
Track points in image list Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure
3.449874
3.390661
1.017463
(forward_track, forward_status) = track(image_list, initial_points, remove_bad=False) # Reverse the order (backward_track, backward_status) = track(image_list[::-1], forward_track[:,-1,:], remove_bad=False) # Prune bad tracks ok_track = np.flatnonzero(forward_status * backward_status) # Only good if good in both forward_first = forward_track[ok_track,0,:] backward_last = backward_track[ok_track,-1,:] # Distance retrack_distance = np.sqrt(np.sum((forward_first - backward_last)**2, 1)) # Allowed retracked_ok = np.flatnonzero(retrack_distance <= max_retrack_distance) final_ok = ok_track[retracked_ok] if keep_bad: # Let caller check status status = np.zeros(forward_status.shape) status[final_ok] = 1 return (forward_track, status) else: # Remove tracks with faulty retrack return (forward_track[final_ok], forward_status[final_ok])
def track_retrack(image_list, initial_points, max_retrack_distance=0.5, keep_bad=False)
Track-retracks points in image list Using track-retrack can help in only getting point tracks of high quality. The point is tracked forward, and then backwards in the image sequence. Points that end up further than max_retrack_distance from its starting point are marked as bad. Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) max_retrack_distance : float The maximum distance of the retracked point from its starting point to still count as a succesful retrack. remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points Note that M is the number of image in the input, and is the track in the forward tracking step. status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure
3.332981
3.288329
1.013579
M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
def from_mat_file(cls, matfilename)
Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance
4.401412
3.295808
1.335458
N = len(self.timestamps) t = self.timestamps[-1] - self.timestamps[0] rate = 1.0 * N / t return rate
def rate(self)
Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps
3.934084
3.387308
1.161419
t1 = t0 + duration indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1)) m = np.mean(self.gyro_data[:, indices], axis=1) self.gyro_data -= m.reshape(3,1) return self.gyro_data
def zero_level_calibrate(self, duration, t0=0.0)
Performs zero-level calibration from the chosen time interval. This changes the previously lodaded data in-place. Parameters -------------------- duration : float Number of timeunits to use for calibration t0 : float Starting time for calibration Returns ---------------------- gyro_data : (3, N) float ndarray The calibrated data (note that it is also changed in-place!)
3.483032
3.253083
1.070686
if uniform: dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt) else: N = len(self.timestamps) integrated = np.zeros((4, N)) integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all for i in range(1, len(self.timestamps)): w = pose_correction.dot(self.gyro_data[:, i]) # Change to correct coordinate frame dt = float(self.timestamps[i] - self.timestamps[i - 1]) qprev = integrated[:, i - 1].flatten() A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew = qnew / qnorm if qnorm > 0 else 0 integrated[:, i] = qnew #print "%d, %s, %s, %s, %s" % (i, w, dt, qprev, qnew) return integrated
def integrate(self, pose_correction=np.eye(3), uniform=True)
Integrate angular velocity measurements to rotations. Parameters ------------- pose_correction : (3,3) ndarray, optional Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera). uniform : bool If True (default), assume uniform sample rate. This will use a faster integration method. Returns ------------- rotations : (4, N) ndarray Rotations as unit quaternions with scalar as first element.
3.273345
3.134402
1.044328
idx = np.flatnonzero(timestamps >= (t - 0.0001))[0] t0 = timestamps[idx - 1] t1 = timestamps[idx] tau = (t - t0) / (t1 - t0) q1 = rotation_sequence[:, idx - 1] q2 = rotation_sequence[:, idx] q = rotations.slerp(q1, q2, tau) return q
def rotation_at_time(t, timestamps, rotation_sequence)
Get the gyro rotation at time t using SLERP. Parameters ----------- t : float The query timestamp. timestamps : array_like float List of all timestamps rotation_sequence : (4, N) ndarray Rotation sequence as unit quaternions with scalar part as first element. Returns ----------- q : (4,) ndarray Unit quaternion representing the rotation at time t.
2.776367
2.65033
1.047555
instance = cls() instance.data = np.loadtxt(filename, delimiter=',') return instance
def from_csv(cls, filename)
Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified axis. Parameters ------------------- filename : str Path to the CSV file Returns --------------------- GyroStream A gyroscope stream
4.355601
7.018912
0.620552
if not data.shape[1] == 3: raise ValueError("Gyroscope data must have shape (N, 3)") instance = cls() instance.data = data return instance
def from_data(cls, data)
Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object
4.638785
3.972841
1.167624
if not dt == self.__last_dt: self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt) self.__last_dt = dt return self.__last_q
def integrate(self, dt)
Integrate gyro measurements to orientation using a uniform sample rate. Parameters ------------------- dt : float Sample distance in seconds Returns ---------------- orientation : (4, N) ndarray Gyroscope orientation in quaternion form (s, q1, q2, q3)
8.208923
6.915275
1.187071
Nc = np.ceil(gstd*3)*2+1 x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True) g = np.exp(-.5*((x/gstd)**2)) g = g/np.sum(g) return g
def gaussian_kernel(gstd)
Generate odd sized truncated Gaussian The generated filter kernel has a cutoff at $3\sigma$ and is normalized to sum to 1 Parameters ------------- gstd : float Standard deviation of filter Returns ------------- g : ndarray Array with kernel coefficients
3.069142
3.024447
1.014778
Ns = np.int(np.floor(np.size(time_series)/downsample_factor)) g = gaussian_kernel(0.5*downsample_factor) ts_blur = np.convolve(time_series,g,'same') ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = (k+.5)*downsample_factor-.5 cfrac = cpos-np.floor(cpos) cind = np.floor(cpos) if cfrac>0: ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac else: ts_out[k]=ts_blur[cind] return ts_out
def subsample(time_series, downsample_factor)
Subsample with Gaussian prefilter The prefilter will have the filter size $\sigma_g=.5*ssfactor$ Parameters -------------- time_series : ndarray Input signal downsample_factor : float Downsampling factor Returns -------------- ts_out : ndarray The downsampled signal
2.928866
2.882844
1.015964
Ns0 = np.size(time_series) Ns = np.int(np.floor(np.size(time_series)*scaling_factor)) ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])])) cfrac = cpos-np.floor(cpos) cind = int(np.floor(cpos)) #print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind) if cfrac>0: ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac else: ts_out[k]=time_series[cind] return ts_out
def upsample(time_series, scaling_factor)
Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal
2.768652
2.706974
1.022785