code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if context.readDataFile('senaite.lims.txt') is None: return logger.info("SENAITE setup handler [BEGIN]") portal = context.getSite() # noqa # Custom setup handlers setup_html_filter(portal) logger.info("SENAITE setup handler [DONE]")
def setup_handler(context)
Generic setup handler
14.861157
14.389626
1.032769
logger.info("*** Setup HTML Filter ***") # bypass the broken API from portal_transforms adapter = IFilterSchema(portal) style_whitelist = adapter.style_whitelist for style in ALLOWED_STYLES: logger.info("Allow style '{}'".format(style)) if style not in style_whitelist: style_whitelist.append(style) adapter.style_whitelist = style_whitelist
def setup_html_filter(portal)
Setup HTML filtering for resultsinterpretations
7.214129
7.423172
0.971839
logger.info("SENAITE LIMS pre-install handler [BEGIN]") # https://docs.plone.org/develop/addons/components/genericsetup.html#custom-installer-code-setuphandlers-py profile_id = "profile-senaite.lims:default" context = portal_setup._getImportContext(profile_id) portal = context.getSite() # noqa # Only install the core once! qi = portal.portal_quickinstaller if not qi.isProductInstalled("bika.lims"): portal_setup.runAllImportStepsFromProfile("profile-bika.lims:default") logger.info("SENAITE LIMS pre-install handler [DONE]")
def pre_install(portal_setup)
Runs berfore the first import step of the *default* profile This handler is registered as a *pre_handler* in the generic setup profile :param portal_setup: SetupTool
9.387812
9.203812
1.019992
logger.info("Run all import steps from SENAITE LIMS ...") context = portal_setup._getImportContext(PROFILE_ID) portal = context.getSite() setup_html_filter(portal) portal_setup.runAllImportStepsFromProfile(PROFILE_ID) logger.info("Run all import steps from SENAITE LIMS [DONE]")
def to_1000(portal_setup)
Initial version to 1000 :param portal_setup: The portal_setup tool
11.158612
11.323108
0.985473
catalogs = [ CATALOG_ANALYSIS_REQUEST_LISTING, "portal_catalog", "bika_setup_catalog", "bika_catalog", "bika_catalog_worksheet_listing" ] search_results = [] for catalog in catalogs: search_results.extend(search(catalog=catalog)) # extract the data from all the brains items = map(get_brain_info, search_results) return { "count": len(items), "items": sorted(items, key=itemgetter("title")), }
def spotlight_search_route(context, request)
The spotlight search route
5.659213
5.714409
0.990341
icon = api.get_icon(brain) # avoid 404 errors with these guys if "document_icon.gif" in icon: icon = "" id = api.get_id(brain) url = api.get_url(brain) title = api.get_title(brain) description = api.get_description(brain) parent = api.get_parent(brain) parent_title = api.get_title(parent) parent_url = api.get_url(parent) return { "id": id, "title": title, "title_or_id": title or id, "description": description, "url": url, "parent_title": parent_title, "parent_url": parent_url, "icon": icon, }
def get_brain_info(brain)
Extract the brain info
2.568847
2.536528
1.012742
if query is None: query = make_query(catalog) if query is None: return [] return api.search(query, catalog=catalog)
def search(query=None, catalog=None)
Search
3.847989
4.073045
0.944745
searchable_text_index = "SearchableText" listing_searchable_text_index = "listing_searchable_text" if catalog == CATALOG_ANALYSIS_REQUEST_LISTING: tool = api.get_tool(catalog) indexes = tool.indexes() if listing_searchable_text_index in indexes: return listing_searchable_text_index return searchable_text_index
def get_search_index_for(catalog)
Returns the search index to query
4.80531
4.781503
1.004979
query = {} request = api.get_request() index = get_search_index_for(catalog) limit = request.form.get("limit") q = request.form.get("q") if len(q) > 0: query[index] = q + "*" else: return None portal_type = request.form.get("portal_type") if portal_type: if not isinstance(portal_type, list): portal_type = [portal_type] query["portal_type"] = portal_type if limit and limit.isdigit(): query["sort_limit"] = int(limit) return query
def make_query(catalog)
A function to prepare a query
3.192886
3.132154
1.01939
url = api.get_url(brain_or_object) modified = api.get_modification_date(brain_or_object).millis() key = "{}?modified={}".format(url, modified) logger.debug("Generated Cache Key: {}".format(key)) return key
def icon_cache_key(method, self, brain_or_object)
Generates a cache key for the icon lookup Includes the virtual URL to handle multiple HTTP/HTTPS domains Example: http://senaite.local/clients?modified=1512033263370
4.5187
4.20082
1.075671
portal_types = api.get_tool("portal_types") fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object)) icon = fti.getIcon() if not icon: return "" # Always try to get the big icon for high-res displays icon_big = icon.replace(".png", "_big.png") # fall back to a default icon if the looked up icon does not exist if self.context.restrictedTraverse(icon_big, None) is None: icon_big = None portal_url = api.get_url(api.get_portal()) title = api.get_title(brain_or_object) html_tag = "<img title='{}' src='{}/{}' width='16' />".format( title, portal_url, icon_big or icon) logger.info("Generated Icon Tag for {}: {}".format( api.get_path(brain_or_object), html_tag)) return html_tag
def get_icon_for(self, brain_or_object)
Get the navigation portlet icon for the brain or object The cache key ensures that the lookup is done only once per domain name
4.346063
4.306958
1.009079
values = { 'width': 'device-width', 'initial-scale': '1.0', } return ','.join('%s=%s' % (k, v) for k, v in values.items())
def getViewportValues(self, view=None)
Determine the value of the viewport meta-tag
3.719366
3.059568
1.215651
plone_view = getMultiAdapter( (self.context, self.request), name=u'plone') portal_state = getMultiAdapter( (self.context, self.request), name=u'plone_portal_state') sl = plone_view.have_portlets('plone.leftcolumn', view=view) sr = plone_view.have_portlets('plone.rightcolumn', view=view) isRTL = portal_state.is_rtl() # pre-fill dictionary columns = dict(one="", content="", two="") if not sl and not sr: # we don't have columns, thus conten takes the whole width columns['content'] = "col-md-12" elif sl and sr: # In case we have both columns, content takes 50% of the whole # width and the rest 50% is spread between the columns columns['one'] = "col-xs-12 col-md-2" columns['content'] = "col-xs-12 col-md-8" columns['two'] = "col-xs-12 col-md-2" elif (sr and not sl) and not isRTL: # We have right column and we are NOT in RTL language columns['content'] = "col-xs-12 col-md-10" columns['two'] = "col-xs-12 col-md-2" elif (sl and not sr) and isRTL: # We have left column and we are in RTL language columns['one'] = "col-xs-12 col-md-2" columns['content'] = "col-xs-12 col-md-10" elif (sl and not sr) and not isRTL: # We have left column and we are in NOT RTL language columns['one'] = "col-xs-12 col-md-2" columns['content'] = "col-xs-12 col-md-10" # # append cell to each css-string # for key, value in columns.items(): # columns[key] = "cell " + value return columns
def getColumnsClasses(self, view=None)
Determine whether a column should be shown. The left column is called plone.leftcolumn; the right column is called plone.rightcolumn.
2.77755
2.70924
1.025214
icon_url = api.get_icon(brain, html_tag=False) url, icon = icon_url.rsplit("/", 1) relative_url = url.lstrip(self.portal.absolute_url()) name, ext = os.path.splitext(icon) # big icons endwith _big if not name.endswith("_big"): icon = "{}_big{}".format(name, ext) icon_big_url = "/".join([relative_url, icon]) # fall back to a default icon if the looked up icon does not exist if self.context.restrictedTraverse(icon_big_url, None) is None: icon_big_url = "++resource++senaite.lims.images/gears.png" return icon_big_url
def get_icon_url(self, brain)
Returns the (big) icon URL for the given catalog brain
5.688226
5.540761
1.026615
query = { "path": { "query": api.get_path(self.setup), "depth": 1, }, } items = api.search(query, "portal_catalog") # filter out items items = filter(lambda item: not item.exclude_from_nav, items) # sort by (translated) title def cmp_by_translated_title(brain1, brain2): title1 = t(api.get_title(brain1)) title2 = t(api.get_title(brain2)) return cmp(title1, title2) return sorted(items, cmp=cmp_by_translated_title)
def setupitems(self)
Lookup available setup items :returns: catalog brains
4.642097
4.190907
1.107659
if hasattr(self, '_content_type'): return self._content_type filename, extension = os.path.splitext(self._file_path) if extension == '.csv': self._content_type = 'text/csv' elif extension == '.tsv': self._content_type = 'text/tab-separated-values' else: self._content_type = 'text/plain' return self._content_type
def content_type(self)
Returns the content-type value determined by file extension.
1.992263
1.740264
1.144805
if self._file_size < self._SINGLE_UPLOAD_MAX: resource = "{0}{1}".format(self._DEFAULT_RESOURCE, self.bucket) response = self.__upload(resource, open(self._file_path, 'rb').read()) return response.headers['location'] else: response = self.__init_chunked_upload() min_chunk_size = int(response.headers['x-ton-min-chunk-size']) chunk_size = min_chunk_size * self._DEFAULT_CHUNK_SIZE location = response.headers['location'] f = open(self._file_path, 'rb') bytes_read = 0 while True: bytes = f.read(chunk_size) if not bytes: break bytes_start = bytes_read bytes_read += len(bytes) response = self.__upload_chunk(location, chunk_size, bytes, bytes_start, bytes_read) response_time = int(response.headers['x-response-time']) chunk_size = min_chunk_size * size(self._DEFAULT_CHUNK_SIZE, self._RESPONSE_TIME_MAX, response_time) f.close() return location.split("?")[0]
def perform(self)
Executes the current TONUpload object.
3.346987
3.123806
1.071445
# note: string conversion required here due to open encoding bug in requests-oauthlib. headers = { 'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)), 'content-length': str(self._file_size), 'content-type': self.content_type } return Request(self._client, 'post', resource, domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform()
def __upload(self, resource, bytes)
Performs a single chunk upload.
9.060537
8.29646
1.092097
# note: string conversion required here due to open encoding bug in requests-oauthlib. headers = { 'x-ton-content-type': self.content_type, 'x-ton-content-length': str(self._file_size), 'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)), 'content-length': str(0), 'content-type': self.content_type } resource = "{0}{1}?resumable=true".format(self._DEFAULT_RESOURCE, self._DEFAULT_BUCKET) return Request(self._client, 'post', resource, domain=self._DEFAULT_DOMAIN, headers=headers).perform()
def __init_chunked_upload(self)
Initialization for a multi-chunk upload.
5.676779
5.475782
1.036707
# note: string conversion required here due to open encoding bug in requests-oauthlib. headers = { 'content-type': self.content_type, 'content-length': str(min([chunk_size, self._file_size - bytes_read])), 'content-range': "bytes {0}-{1}/{2}".format( bytes_start, bytes_read - 1, self._file_size) } return Request(self._client, 'put', resource, domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform()
def __upload_chunk(self, resource, chunk_size, bytes, bytes_start, bytes_read)
Uploads a single chunk of a multi-chunk upload.
4.973521
5.028262
0.989113
if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._current_index = 0 raise StopIteration
def next(self)
Returns the next item in the cursor.
2.581895
2.392711
1.079067
params = self.to_params() if 'tweet_id' in params: params['tweet_ids'] = [params['tweet_id']] del params['tweet_id'] if self.id: raise HTTPError("Method PUT not allowed.") resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id) response = Request(self.account.client, 'post', resource, params=params).perform() return self.from_response(response.body['data'][0])
def save(self)
Saves or updates the current object instance depending on the presence of `object.id`.
3.927238
3.912968
1.003647
if self.id: resource = self.PREVIEW resource = resource.format(account_id=self.account.id, id=self.id) response = Request(self.account.client, 'get', resource).perform() return response.body['data']
def preview(self)
Returns an HTML preview for a Scheduled Tweet.
5.2021
4.802073
1.083303
resource = klass.RESOURCE.format(id=id) response = Request(client, 'get', resource, params=kwargs).perform() return klass(client).from_response(response.body['data'])
def load(klass, client, id, **kwargs)
Returns an object instance for a given resource.
4.960845
4.599257
1.078619
resource = klass.RESOURCE_COLLECTION request = Request(client, 'get', resource, params=kwargs) return Cursor(klass, request, init_with=[client])
def all(klass, client, **kwargs)
Returns a Cursor instance for a given resource.
8.677993
6.513203
1.33237
self._validate_loaded() resource = self.FEATURES.format(id=self.id) response = Request(self.client, 'get', resource).perform() return response.body['data']
def features(self)
Returns a collection of features available to the current account.
8.898976
7.174701
1.240327
self._validate_loaded() params = {'user_id': id} params.update(kwargs) resource = self.SCOPED_TIMELINE.format(id=self.id) response = Request(self.client, 'get', resource, params=params).perform() return response.body['data']
def scoped_timeline(self, *id, **kwargs)
Returns the most recent promotable Tweets created by the specified Twitter user.
5.134161
4.833315
1.062244
start = min([parse(d['activity_start_time']) for d in data]) end = max([parse(d['activity_end_time']) for d in data]) start = remove_hours(start) end = remove_hours(end) + timedelta(days=1) return start, end
def date_range(data)
Returns the minimum activity start time and the maximum activity end time from the active entities response. These dates are modified in the following way. The hours (and minutes and so on) are removed from the start and end times and a *day* is added to the end time. These are the dates that should be used in the subsequent analytics request.
2.929561
2.341412
1.251194
if isinstance(VERSION[-1], str): return '.'.join(map(str, VERSION[:-1])) + VERSION[-1] return '.'.join(map(str, VERSION))
def get_version()
Returns a string representation of the current SDK version.
3.155571
3.066943
1.028898
if not granularity: if type(time) is datetime.date: return format_date(time) else: return format_time(time) if granularity == GRANULARITY.HOUR: return format_time(remove_minutes(time)) elif granularity == GRANULARITY.DAY: return format_date(remove_hours(time)) else: return format_time(time)
def to_time(time, granularity)
Returns a truncated and rounded time string based on the specified granularity.
2.429053
2.426227
1.001165
return formatdate(timeval=mktime(time.timetuple()), localtime=False, usegmt=True)
def http_time(time)
Formats a datetime as an RFC 1123 compliant string.
4.245875
3.246936
1.307656
if response_time_actual == 0: response_time_actual = 1 scale = 1 / (response_time_actual / response_time_max) size = int(default_chunk_size * scale) return min(max(size, 1), default_chunk_size)
def size(default_chunk_size, response_time_max, response_time_actual)
Determines the chunk size based on response times.
2.33564
2.322495
1.00566
if response.code: return ERRORS[response.code](response) else: return Error(response)
def from_response(response)
Returns the correct error type from a ::class::`Response` object.
8.724373
5.672925
1.537897
def fget(self): return self._options.get('sandbox', None) def fset(self, value): self._options['sandbox'] = value return locals()
def sandbox()
Enables and disables sandbox mode.
3.535611
3.106452
1.138151
def fget(self): return self._options.get('trace', None) def fset(self, value): self._options['trace'] = value return locals()
def trace()
Enables and disables request tracing.
3.395801
3.055882
1.111234
return Account.load(self, id) if id else Account.all(self)
def accounts(self, id=None)
Returns a collection of advertiser :class:`Accounts` available to the current access token.
6.362135
10.677545
0.595843
resource = klass.RESOURCE_OPTIONS + 'platform_versions' request = Request(account.client, 'get', resource, params=kwargs) return Cursor(None, request)
def platform_versions(klass, account, **kwargs)
Returns a list of supported platform versions
11.420634
10.152625
1.124895
self._validate_loaded() if id is None: return TargetingCriteria.all(self.account, self.id, **kwargs) else: return TargetingCriteria.load(self.account, id, **kwargs)
def targeting_criteria(self, id=None, **kwargs)
Returns a collection of targeting criteria available to the current line item.
3.493034
3.837227
0.910302
params = {} params.update(kwargs) # handles array to string conversion for media IDs if 'media_ids' in params and isinstance(params['media_ids'], list): params['media_ids'] = ','.join(map(str, params['media_ids'])) resource = klass.TWEET_ID_PREVIEW if params.get('id') else klass.TWEET_PREVIEW resource = resource.format(account_id=account.id, id=params.get('id')) response = Request(account.client, 'get', resource, params=params).perform() return response.body['data']
def preview(klass, account, **kwargs)
Returns an HTML preview of a tweet, either new or existing.
3.316408
3.027197
1.095538
params = {} params.update(kwargs) # handles array to string conversion for media IDs if 'media_ids' in params and isinstance(params['media_ids'], list): params['media_ids'] = ','.join(map(str, params['media_ids'])) resource = klass.TWEET_CREATE.format(account_id=account.id) response = Request(account.client, 'post', resource, params=params).perform() return response.body['data']
def create(klass, account, **kwargs)
Creates a "Promoted-Only" Tweet using the specialized Ads API end point.
3.616292
3.087183
1.171389
klass.PROPERTIES[name] = kwargs def getter(self): return getattr(self, '_%s' % name, kwargs.get('default', None)) if kwargs.get('readonly', False): setattr(klass, name, property(getter)) else: def setter(self, value): setattr(self, '_%s' % name, value) setattr(klass, name, property(getter, setter))
def resource_property(klass, name, **kwargs)
Builds a resource object property.
2.17367
2.212627
0.982393
for name in self.PROPERTIES: attr = '_{0}'.format(name) transform = self.PROPERTIES[name].get('transform', None) value = response.get(name, None) if transform and transform == TRANSFORM.TIME and value: setattr(self, attr, dateutil.parser.parse(value)) if isinstance(value, int) and value == 0: continue # skip attribute else: setattr(self, attr, value) return self
def from_response(self, response)
Populates a given objects attributes from a parsed JSON API response. This helper handles all necessary type coercions as it assigns attribute values.
4.154754
4.077163
1.019031
params = {} for name in self.PROPERTIES: attr = '_{0}'.format(name) value = getattr(self, attr, None) or getattr(self, name, None) # skip attribute if value is None: continue if isinstance(value, datetime): params[name] = format_time(value) elif isinstance(value, list): params[name] = ','.join(map(str, value)) elif isinstance(value, bool): params[name] = str(value).lower() else: params[name] = value return params
def to_params(self)
Generates a Hash of property values for the current object. This helper handles all necessary type coercions as it generates its output.
2.509379
2.415378
1.038918
resource = klass.BATCH_RESOURCE_COLLECTION.format(account_id=account.id) json_body = [] for obj in objs: entity_type = klass._ENTITY_MAP[klass.__name__].lower() obj_json = {'params': obj.to_params()} if obj.id is None: obj_json['operation_type'] = 'Create' elif obj.to_delete is True: obj_json['operation_type'] = 'Delete' obj_json['params'][entity_type + '_id'] = obj.id else: obj_json['operation_type'] = 'Update' obj_json['params'][entity_type + '_id'] = obj.id json_body.append(obj_json) resource = klass.BATCH_RESOURCE_COLLECTION.format(account_id=account.id) response = Request(account.client, 'post', resource, body=json.dumps(json_body), headers={'Content-Type': 'application/json'}).perform() # persist each entity for obj, res_obj in zip(objs, response.body['data']): obj = obj.from_response(res_obj)
def batch_save(klass, account, objs)
Makes batch request(s) for a passed in list of objects
2.814915
2.772583
1.015268
def stats(self, metrics, **kwargs): # noqa return self.__class__.all_stats(self.account, [self.id], metrics, **kwargs)
Pulls a list of metrics for the current object instance.
null
null
null
end_time = kwargs.get('end_time', datetime.utcnow()) start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800)) granularity = kwargs.get('granularity', GRANULARITY.HOUR) placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER) params = { 'metric_groups': ','.join(metric_groups), 'start_time': to_time(start_time, granularity), 'end_time': to_time(end_time, granularity), 'granularity': granularity.upper(), 'entity': klass.ANALYTICS_MAP[klass.__name__], 'placement': placement } params['entity_ids'] = ','.join(ids) return params
def _standard_params(klass, ids, metric_groups, **kwargs)
Sets the standard params for a stats request
2.879827
2.826528
1.018857
params = klass._standard_params(ids, metric_groups, **kwargs) resource = klass.RESOURCE_SYNC.format(account_id=account.id) response = Request(account.client, 'get', resource, params=params).perform() return response.body['data']
def all_stats(klass, account, ids, metric_groups, **kwargs)
Pulls a list of metrics for a specified set of object IDs.
4.790574
4.603015
1.040747
params = klass._standard_params(ids, metric_groups, **kwargs) params['platform'] = kwargs.get('platform', None) params['country'] = kwargs.get('country', None) params['segmentation_type'] = kwargs.get('segmentation_type', None) resource = klass.RESOURCE_ASYNC.format(account_id=account.id) response = Request(account.client, 'post', resource, params=params).perform() return response.body['data']
def queue_async_stats_job(klass, account, ids, metric_groups, **kwargs)
Queues a list of metrics for a specified set of object IDs asynchronously
3.60791
3.682081
0.979856
params = { 'job_ids': job_id } resource = klass.RESOURCE_ASYNC.format(account_id=account.id) response = Request(account.client, 'get', resource, params=params).perform() return response.body['data'][0]
def async_stats_job_result(klass, account, job_id, **kwargs)
Returns the results of the specified async job IDs
4.790906
4.16679
1.149784
resource = urlparse(url) domain = '{0}://{1}'.format(resource.scheme, resource.netloc) response = Request(account.client, 'get', resource.path, domain=domain, raw_body=True, stream=True).perform() return response.body
def async_stats_job_data(klass, account, url, **kwargs)
Returns the results of the specified async job IDs
6.103134
5.984114
1.019889
audience = klass(account) getattr(audience, '__create_audience__')(name) try: return audience.reload() except BadRequest as e: audience.delete() raise e
def create(klass, account, name)
Creates a new tailored audience.
7.820993
6.61833
1.181717
resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id) headers = {'Content-Type': 'application/json'} response = Request(self.account.client, 'post', resource, headers=headers, body=json.dumps(params)).perform() success_count = response.body['data']['success_count'] total_count = response.body['data']['total_count'] return (success_count, total_count)
def users(self, params)
This is a private API and requires whitelisting from Twitter. This endpoint will allow partners to add, update and remove users from a given tailored_audience_id. The endpoint will also accept multiple user identifier types per user as well.
3.196924
3.171578
1.007992
self._validate_loaded() return TailoredAudiencePermission.all(self.account, self.id, **kwargs)
def permissions(self, **kwargs)
Returns a collection of permissions for the curent tailored audience.
14.566651
6.721651
2.167124
resource = klass.RESOURCE_COLLECTION.format( account_id=account.id, tailored_audience_id=tailored_audience_id) request = Request(account.client, 'get', resource, params=kwargs) return Cursor(klass, request, init_with=[account])
def all(klass, account, tailored_audience_id, **kwargs)
Returns a Cursor instance for the given tailored audience permission resource.
3.645778
3.316385
1.099323
if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
def save(self)
Saves or updates the current tailored audience permission.
2.725255
2.293547
1.188227
resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) response = Request(self.account.client, 'delete', resource).perform() return self.from_response(response.body['data'])
def delete(self)
Deletes the current tailored audience permission.
4.249181
3.154251
1.347128
resource = klass.RESOURCE_CONVERSATIONS.format(account_id=account.id) request = Request( account.client, klass.METHOD, resource, headers=klass.HEADERS, body=params) return Cursor(klass, request, init_with=[account])
def __get(klass, account, client, params)
Helper function to get the conversation data Returns a Cursor instance
7.771532
6.645175
1.1695
body = { "conversation_type": self.conversation_type, "audience_definition": self.audience_definition, "targeting_inputs": self.targeting_inputs } return self.__get(account=self.account, client=self.account.client, params=json.dumps(body))
def conversations(self)
Get the conversation topics for an input targeting criteria
5.898088
4.945632
1.192586
body = { "audience_definition": self.audience_definition, "targeting_inputs": self.targeting_inputs } resource = self.RESOURCE_DEMOGRAPHICS.format(account_id=self.account.id) response = Request( self.account.client, self.METHOD, resource, headers=self.HEADERS, body=json.dumps(body)).perform() return response.body['data']
def demographics(self)
Get the demographic breakdown for an input targeting criteria
5.328178
4.732966
1.125759
shell.ShellCommand.setupEnvironment(self, cmd) env = {} for k, v in self.build.getProperties().properties.items(): env[str(k)] = str(v[0]) if cmd.args['env'] is None: cmd.args['env'] = {} cmd.args['env'].update(env)
def setupEnvironment(self, cmd)
Turn all build properties into environment variables
3.543849
3.074055
1.152825
stdio = log.getText() total = passed = skipped = fails = warnings = errors = 0 hastests = False # Plone? That has lines starting "Ran" and "Total". Total is missing if there is only a single layer. # For this reason, we total ourselves which lets us work even if someone runes 2 batches of plone tests # from a single target # Example:: # Ran 24 tests with 0 failures and 0 errors in 0.009 seconds if not hastests: outputs = re.findall( "Ran (?P<count>[\d]+) tests with (?P<fail>[\d]+) failures and (?P<error>[\d]+) errors", stdio) for output in outputs: total += int(output[0]) fails += int(output[1]) errors += int(output[2]) hastests = True # Twisted # Example:: # FAILED (errors=5, successes=11) # PASSED (successes=16) if not hastests: for line in stdio.split("\n"): if line.startswith("FAILED (") or line.startswith("PASSED ("): hastests = True line = line[8:][:-1] stats = line.split(", ") data = {} for stat in stats: k, v = stat.split("=") data[k] = int(v) if "successes" not in data: total = 0 for number in re.findall( "Ran (?P<count>[\d]+) tests in ", stdio): total += int(number) data["successes"] = total - sum(data.values()) # This matches Nose and Django output # Example:: # Ran 424 tests in 152.927s # FAILED (failures=1) # FAILED (errors=3) if not hastests: fails += len(re.findall('FAIL:', stdio)) errors += len( re.findall( '======================================================================\nERROR:', stdio)) for number in re.findall("Ran (?P<count>[\d]+)", stdio): total += int(number) hastests = True # We work out passed at the end because most test runners dont tell us # and we can't distinguish between different test systems easily so we # might double count. passed = total - (skipped + fails + errors + warnings) # Update the step statistics with out shiny new totals if hastests: self.setStatistic('total', total) self.setStatistic('fails', fails) self.setStatistic('errors', errors) self.setStatistic('warnings', warnings) self.setStatistic('skipped', skipped) self.setStatistic('passed', passed)
def updateStats(self, log)
Parse test results out of common test harnesses. Currently supported are: * Plone * Nose * Trial * Something mitchell wrote in Java
4.747205
4.622036
1.027081
res = yield self.assertAllowed(request) if res: defer.returnValue(res) request.setHeader('Content-Type', 'application/json') if self._in_progress: defer.returnValue(json.dumps({'success': False, 'errors': ['reconfig already in progress']})) self._in_progress = True cfg = json.loads(request.content.read()) if cfg != self._cfg: try: err = yield self.saveCfg(cfg) except Exception as e: # noqa err = [repr(e)] if err is not None: self._in_progress = False yield self.saveCfg(self._cfg) defer.returnValue(json.dumps({'success': False, 'errors': err})) yield self.ep.master.reconfig() defer.returnValue(json.dumps({'success': True}))
def saveConfig(self, request)
I save the config, and run check_config, potencially returning errors
3.331594
3.265591
1.020212
height, width = map_in.shape map_part = (2.0/11.0)*map_in # notice how [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)] * [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)]^T # equals [[1/3, 1/3, 1/3], [1/3, 1/3, 1/3], [1/3, 1/3, 1/3]] # multiply that by (3/11) and we have the 2d kernel from the example above # therefore the kernel is seperable w = -1.0/numpy.sqrt(3.0) kernel = [w, w, w] def _anti_alias_step(original): # cf. comments above fo the factor # this also makes a copy which might actually be superfluous result = original * (3.0/11.0) # we need to handle boundary conditions by hand, unfortunately # there might be a better way but this works (circular boundary) # notice how we'll need to add 2 to width and height later # because of this result = numpy.append(result, [result[0,:]], 0) result = numpy.append(result, numpy.transpose([result[:,0]]), 1) result = numpy.insert(result, [0], [result[-2,:]],0) result = numpy.insert(result, [0], numpy.transpose([result[:,-2]]), 1) # with a seperable kernel we can convolve the rows first ... for y in range(height+2): result[y,1:-1] = numpy.convolve(result[y,:], kernel, 'valid') # ... and then the columns for x in range(width+2): result[1:-1,x] = numpy.convolve(result[:,x], kernel, 'valid') # throw away invalid values at the boundary result = result[1:-1,1:-1] result += map_part return result current = map_in for i in range(steps): current = _anti_alias_step(current) return current
def anti_alias(map_in, steps)
Execute the anti_alias operation steps times on the given map
4.185747
4.180669
1.001215
'''Count how many neighbours of a coordinate are set to one. This uses the same principles as anti_alias, compare comments there.''' height, width = mask.shape f = 2.0*radius+1.0 w = -1.0/numpy.sqrt(f) kernel = [w]*radius + [w] + [w]*radius result = mask * f for y in range(height): result[y,:] = numpy.convolve(result[y,:], kernel, 'same') for x in range(width): result[:,x] = numpy.convolve(result[:, x], kernel, 'same') return result - mask
def count_neighbours(mask, radius=1)
Count how many neighbours of a coordinate are set to one. This uses the same principles as anti_alias, compare comments there.
5.941465
3.422616
1.735943
min_dist = None nearest_hp_i = None for i, hp in enumerate(hot_points): dist = distance_f(p, hp) if min_dist is None or dist < min_dist: min_dist = dist nearest_hp_i = i return nearest_hp_i
def index_of_nearest(p, hot_points, distance_f=distance)
Given a point and a set of hot points it found the hot point nearest to the given point. An arbitrary distance function can be specified :return the index of the nearest hot points, or None if the list of hot points is empty
1.760897
1.965635
0.895841
try: data, thresholds = val except ValueError: raise ValueError("Pass an iterable: (data, thresholds)") else: if data.shape[0] != self.height: raise Exception("Setting data with wrong height") if data.shape[1] != self.width: raise Exception("Setting data with wrong width") self.layers['precipitation'] = LayerWithThresholds(data, thresholds)
def precipitation(self, val)
Precipitation is a value in [-1,1]
4.672082
4.912724
0.951017
rng = numpy.random.RandomState(seed) # create our own random generator base = rng.randint(0, 4096) curve_gamma = world.gamma_curve curve_bonus = world.curve_offset height = world.height width = world.width border = width / 4 precipitations = numpy.zeros((height, width), dtype=float) octaves = 6 freq = 64.0 * octaves n_scale = 1024 / float(height) #This is a variable I am adding. It exists #so that worlds sharing a common seed but #different sizes will have similar patterns for y in range(height):#TODO: numpy for x in range(width): n = snoise2((x * n_scale) / freq, (y * n_scale) / freq, octaves, base=base) # Added to allow noise pattern to wrap around right and left. if x < border: n = (snoise2( (x * n_scale) / freq, (y * n_scale) / freq, octaves, base=base) * x / border) + ( snoise2(( (x * n_scale) + width) / freq, (y * n_scale) / freq, octaves, base=base) * (border - x) / border) precipitations[y, x] = n #find ranges min_precip = precipitations.min() max_precip = precipitations.max() min_temp = world.layers['temperature'].min() max_temp = world.layers['temperature'].max() precip_delta = (max_precip - min_precip) temp_delta = (max_temp - min_temp) #normalize temperature and precipitation arrays t = (world.layers['temperature'].data - min_temp) / temp_delta p = (precipitations - min_precip) / precip_delta #modify precipitation based on temperature #-------------------------------------------------------------------------------- # # Ok, some explanation here because why the formula is doing this may be a # little confusing. We are going to generate a modified gamma curve based on # normalized temperature and multiply our precipitation amounts by it. # # numpy.power(t,curve_gamma) generates a standard gamma curve. However # we probably don't want to be multiplying precipitation by 0 at the far # side of the curve. To avoid this we multiply the curve by (1 - curve_bonus) # and then add back curve_bonus. Thus, if we have a curve bonus of .2 then # the range of our modified gamma curve goes from 0-1 to 0-.8 after we # multiply and then to .2-1 after we add back the curve_bonus. # # Because we renormalize there is not much point to offsetting the opposite end # of the curve so it is less than or more than 1. We are trying to avoid # setting the start of the curve to 0 because f(t) * p would equal 0 when t equals # 0. However f(t) * p does not automatically equal 1 when t equals 1 and if we # raise or lower the value for f(t) at 1 it would have negligible impact after # renormalizing. # #-------------------------------------------------------------------------------- curve = (numpy.power(t, curve_gamma) * (1-curve_bonus)) + curve_bonus precipitations = numpy.multiply(p, curve) #Renormalize precipitation because the precipitation #changes will probably not fully extend from -1 to 1. min_precip = precipitations.min() max_precip = precipitations.max() precip_delta = (max_precip - min_precip) precipitations = (((precipitations - min_precip) / precip_delta) * 2) - 1 return precipitations
def _calculate(seed, world)
Precipitation is a value in [-1,1]
5.009169
4.961231
1.009662
def from_dimensions(cls, width, height, channels, filename=None, grayscale=False, channel_bitdepth=8, has_alpha=False, palette=None): assert 1 <= channels <= 4, "PNG only supports 1 to 4 channels per pixel. Error writing %s." % filename dimensions = (height, width, channels) if channels == 1: dimensions = (height, width) # keep the array 2-dimensional when possible _array = numpy.zeros(dimensions, dtype=PNGWriter.get_dtype(channel_bitdepth)) return cls(_array, filename, grayscale=grayscale, channel_bitdepth=channel_bitdepth, has_alpha=has_alpha, palette=palette, channels=channels)
Creates an empty image according to width, height and channels. Channels must be 1 (grayscale/palette), 2 (LA), 3 (RGB) or 4 (RGBA). The image will be filled with black, transparent pixels.
null
null
null
def from_array(cls, array, filename=None, channels=3, scale_to_range=False, grayscale=False, channel_bitdepth=8, has_alpha=False, palette=None): if scale_to_range: amax = array.max() amin = array.min() _array = (2**channel_bitdepth - 1) * (array - amin) / (amax - amin) else: _array = array _array = numpy.rint(_array).astype(dtype=PNGWriter.get_dtype(channel_bitdepth)) # proper rounding return cls(_array, filename, channels=channels, grayscale=grayscale, channel_bitdepth=channel_bitdepth, has_alpha=has_alpha, palette=palette)
Creates an image by using a provided array. The array may be ready to be written or still need fine-tuning via set_pixel(). The array should not have more than 3 dimensions or the output might be unexpected.
null
null
null
def set_pixel(self, x, y, color): try: # these checks are for convenience, not for safety if len(color) < self.channels: # color is a a tuple (length >= 1) if len(color) == 1: if self.channels == 2: color = [color[0], 255] elif self.channels == 3: color = [color[0], color[0], color[0]] elif self.channels == 4: color = [color[0], color[0], color[0], 255] elif len(color) == 2: if self.channels == 3: color = [color[0], color[1], 0] elif self.channels == 4: color = [color[0], color[1], 0, 255] elif len(color) == 3: if self.channels == 4: color = [color[0], color[1], color[2], 255] except TypeError: # color is not an iterable if self.channels > 1: if self.channels == 2: color = [color, 255] elif self.channels == 3: color = [color, color, color] else: # only values 1..4 are allowed color = [color, color, color, 255] self.array[y, x] = color
Color may be: value, tuple, list etc. If the image is set to contain more color-channels than len(color), the remaining channels will be filled automatically. Example (channels = 4, i.e. RGBA output): color = 17 -> color = [17,17,17,255] color = (17, 99) -> color = [17,99,0,255] Passing in shorthand color-tuples for larger images on a regular basis might result in a very noticeable performance penalty.
null
null
null
color_step = 1.5 if sea_level is None: sea_level = -1 if elevation < sea_level/2: elevation /= sea_level return 0.0, 0.0, 0.75 + 0.5 * elevation elif elevation < sea_level: elevation /= sea_level return 0.0, 2 * (elevation - 0.5), 1.0 else: elevation -= sea_level if elevation < 1.0 * color_step: return (0.0, 0.5 + 0.5 * elevation / color_step, 0.0) elif elevation < 1.5 * color_step: return 2 * (elevation - 1.0 * color_step) / color_step, 1.0, 0.0 elif elevation < 2.0 * color_step: return 1.0, 1.0 - (elevation - 1.5 * color_step) / color_step, 0 elif elevation < 3.0 * color_step: return (1.0 - 0.5 * (elevation - 2.0 * color_step) / color_step, 0.5 - 0.25 * (elevation - 2.0 * color_step) / color_step, 0) elif elevation < 5.0 * color_step: return (0.5 - 0.125 * (elevation - 3.0 * color_step) / (2 * color_step), 0.25 + 0.125 * (elevation - 3.0 * color_step) / (2 * color_step), 0.375 * (elevation - 3.0 * color_step) / (2 * color_step)) elif elevation < 8.0 * color_step: return (0.375 + 0.625 * (elevation - 5.0 * color_step) / (3 * color_step), 0.375 + 0.625 * (elevation - 5.0 * color_step) / (3 * color_step), 0.375 + 0.625 * (elevation - 5.0 * color_step) / (3 * color_step)) else: elevation -= 8.0 * color_step while elevation > 2.0 * color_step: elevation -= 2.0 * color_step return 1, 1 - elevation / 4.0, 1
def _elevation_color(elevation, sea_level=1.0)
Calculate color based on elevation :param elevation: :return:
1.694641
1.694759
0.999931
''' Do some *args magic to return a tuple, which has the sums of all tuples in *args ''' # Adapted from an answer here: http://stackoverflow.com/questions/14180866/sum-each-value-in-a-list-of-tuples added = [sum(x) for x in zip(*args)] return numpy.clip(added, 0, 255)
def add_colors(*args)
Do some *args magic to return a tuple, which has the sums of all tuples in *args
6.862995
3.92443
1.748788
''' Average the values of two colors together ''' r = int((c1[0] + c2[0])/2) g = int((c1[1] + c2[1])/2) b = int((c1[2] + c2[2])/2) return (r, g, b)
def average_colors(c1, c2)
Average the values of two colors together
1.992058
2.040415
0.9763
''' Convert raw elevation into normalized values between 0 and 255, and return a numpy array of these values ''' e = world.layers['elevation'].data ocean = world.layers['ocean'].data mask = numpy.ma.array(e, mask=ocean) # only land min_elev_land = mask.min() max_elev_land = mask.max() elev_delta_land = max_elev_land - min_elev_land mask = numpy.ma.array(e, mask=numpy.logical_not(ocean)) # only ocean min_elev_sea = mask.min() max_elev_sea = mask.max() elev_delta_sea = max_elev_sea - min_elev_sea c = numpy.empty(e.shape, dtype=numpy.float) c[numpy.invert(ocean)] = (e[numpy.invert(ocean)] - min_elev_land) * 127 / elev_delta_land + 128 c[ocean] = (e[ocean] - min_elev_sea) * 127 / elev_delta_sea c = numpy.rint(c).astype(dtype=numpy.int32) # proper rounding return c
def get_normalized_elevation_array(world)
Convert raw elevation into normalized values between 0 and 255, and return a numpy array of these values
2.544116
2.261184
1.125126
''' This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function. ''' v = world.biome_at((x, y)).name() biome_color = _biome_satellite_colors[v] # Default is no noise - will be overwritten if this tile is land noise = (0, 0, 0) if world.is_land((x, y)): ## Generate some random noise to apply to this pixel # There is noise for each element of the rgb value # This noise will be further modified by the height of this tile noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) # draw three random numbers at once ####### Case 1 - elevation is very high ######## if elev > HIGH_MOUNTAIN_ELEV: # Modify the noise to make the area slightly brighter to simulate snow-topped mountains. noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER) # Average the biome's color with the MOUNTAIN_COLOR to tint the terrain biome_color = average_colors(biome_color, MOUNTAIN_COLOR) ####### Case 2 - elevation is high ######## elif elev > MOUNTAIN_ELEV: # Modify the noise to make this tile slightly darker, especially draining the green noise = add_colors(noise, MOUNTAIN_NOISE_MODIFIER) # Average the biome's color with the MOUNTAIN_COLOR to tint the terrain biome_color = average_colors(biome_color, MOUNTAIN_COLOR) ####### Case 3 - elevation is somewhat high ######## elif elev > HIGH_HILL_ELEV: noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER) ####### Case 4 - elevation is a little bit high ######## elif elev > HILL_ELEV: noise = add_colors(noise, HILL_NOISE_MODIFIER) # There is also a minor base modifier to the pixel's rgb value based on height modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER) base_elevation_modifier = (modification_amount, modification_amount, modification_amount) this_tile_color = add_colors(biome_color, noise, base_elevation_modifier) return this_tile_color
def get_biome_color_based_on_elevation(world, elev, x, y, rng)
This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function.
6.033388
2.763002
2.183635
e = world.layers['elevation'].data c = numpy.empty(e.shape, dtype=numpy.float) has_ocean = not (sea_level is None or world.layers['ocean'].data is None or not world.layers['ocean'].data.any()) # or 'not any ocean' mask_land = numpy.ma.array(e, mask=world.layers['ocean'].data if has_ocean else False) # only land min_elev_land = mask_land.min() max_elev_land = mask_land.max() elev_delta_land = (max_elev_land - min_elev_land) / 11.0 if has_ocean: land = numpy.logical_not(world.layers['ocean'].data) mask_ocean = numpy.ma.array(e, mask=land) # only ocean min_elev_sea = mask_ocean.min() max_elev_sea = mask_ocean.max() elev_delta_sea = max_elev_sea - min_elev_sea c[world.layers['ocean'].data] = ((e[world.layers['ocean'].data] - min_elev_sea) / elev_delta_sea) c[land] = ((e[land] - min_elev_land) / elev_delta_land) + 1 else: c = ((e - min_elev_land) / elev_delta_land) + 1 for y in range(world.height): for x in range(world.width): r, g, b = elevation_color(c[y, x], sea_level) target.set_pixel(x, y, (int(r * 255), int(g * 255), int(b * 255), 255))
def draw_simple_elevation(world, sea_level, target)
This function can be used on a generic canvas (either an image to save on disk or a canvas part of a GUI)
2.347248
2.358481
0.995237
# iterate through each cell for x in range(world.width - 1): for y in range(world.height - 1): # search around cell for a direction path = self.find_quick_path([x, y], world) if path: tx, ty = path flow_dir = [tx - x, ty - y] key = 0 for direction in DIR_NEIGHBORS_CENTER: if direction == flow_dir: water_path[y, x] = key key += 1
def find_water_flow(self, world, water_path)
Find the flow direction for each cell in heightmap
4.690854
4.416118
1.062212
river_source_list = [] # Using the wind and rainfall data, create river 'seeds' by # flowing rainfall along paths until a 'flow' threshold is reached # and we have a beginning of a river... trickle->stream->river->sea # step one: Using flow direction, follow the path for each cell # adding the previous cell's flow to the current cell's flow. # step two: We loop through the water flow map looking for cells # above the water flow threshold. These are our river sources and # we mark them as rivers. While looking, the cells with no # out-going flow, above water flow threshold and are still # above sea level are marked as 'sources'. for y in range(0, world.height - 1): for x in range(0, world.width - 1): rain_fall = world.layers['precipitation'].data[y, x] water_flow[y, x] = rain_fall if water_path[y, x] == 0: continue # ignore cells without flow direction cx, cy = x, y # begin with starting location neighbour_seed_found = False # follow flow path to where it may lead while not neighbour_seed_found: # have we found a seed? if world.is_mountain((cx, cy)) and water_flow[cy, cx] >= RIVER_TH: # try not to create seeds around other seeds for seed in river_source_list: sx, sy = seed if in_circle(9, cx, cy, sx, sy): neighbour_seed_found = True if neighbour_seed_found: break # we do not want seeds for neighbors river_source_list.append([cx, cy]) # river seed break # no path means dead end... if water_path[cy, cx] == 0: break # break out of loop # follow path, add water flow from previous cell dx, dy = DIR_NEIGHBORS_CENTER[water_path[cy, cx]] nx, ny = cx + dx, cy + dy # calculate next cell water_flow[ny, nx] += rain_fall cx, cy = nx, ny # set current cell to next cell return river_source_list
def river_sources(world, water_flow, water_path)
Find places on map where sources of river can be found
6.415879
6.407528
1.001303
current_location = source path = [source] # start the flow while True: x, y = current_location # is there a river nearby, flow into it for dx, dy in DIR_NEIGHBORS: ax, ay = x + dx, y + dy if self.wrap: ax, ay = overflow(ax, world.width), overflow(ay, world.height) for river in river_list: if [ax, ay] in river: merge = False for rx, ry in river: if [ax, ay] == [rx, ry]: merge = True path.append([rx, ry]) elif merge: path.append([rx, ry]) return path # skip the rest, return path # found a sea? if world.is_ocean((x, y)): break # find our immediate lowest elevation and flow there quick_section = self.find_quick_path(current_location, world) if quick_section: path.append(quick_section) current_location = quick_section continue # stop here and enter back into loop is_wrapped, lower_elevation = self.findLowerElevation( current_location, world) if lower_elevation and not is_wrapped: lower_path = worldengine.astar.PathFinder().find( world.layers['elevation'].data, current_location, lower_elevation) if lower_path: path += lower_path current_location = path[-1] else: break elif lower_elevation and is_wrapped: # TODO: make this more natural max_radius = 40 cx, cy = current_location lx, ly = lower_elevation if x < 0 or y < 0 or x > world.width or y > world.height: raise Exception( "BUG: fix me... we shouldn't be here: %s %s" % ( current_location, lower_elevation)) if not in_circle(max_radius, cx, cy, lx, cy): # are we wrapping on x axis? if cx - lx < 0: lx = 0 # move to left edge nx = world.width - 1 # next step is wrapped around else: lx = world.width - 1 # move to right edge nx = 0 # next step is wrapped around ly = ny = int((cy + ly) / 2) # move halfway elif not in_circle(max_radius, cx, cy, cx, ly): # are we wrapping on y axis? if cy - ly < 0: ly = 0 # move to top edge ny = world.height - 1 # next step is wrapped around else: ly = world.height - 1 # move to bottom edge ny = 0 # next step is wrapped around lx = nx = int((cx + lx) / 2) # move halfway else: raise Exception( "BUG: fix me... we are not in circle: %s %s" % ( current_location, lower_elevation)) # find our way to the edge edge_path = worldengine.astar.PathFinder().find( world.layers['elevation'].data, [cx, cy], [lx, ly]) if not edge_path: # can't find another other path, make it a lake lake_list.append(current_location) break path += edge_path # add our newly found path path.append([nx, ny]) # finally add our overflow to other side current_location = path[-1] # find our way to lowest position original found lower_path = worldengine.astar.PathFinder().find( world.layers['elevation'].data, current_location, lower_elevation) path += lower_path current_location = path[-1] else: # can't find any other path, make it a lake lake_list.append(current_location) break # end of river if not world.contains(current_location): print("Why are we here:", current_location) return path
def river_flow(self, source, world, river_list, lake_list)
simulate fluid dynamics by using starting point and flowing to the lowest available point
3.399722
3.394985
1.001395
'''Validate that for each point in river is equal to or lower than the last''' celevation = 1.0 for r in river: rx, ry = r relevation = world.layers['elevation'].data[ry, rx] if relevation <= celevation: celevation = relevation elif relevation > celevation: world.layers['elevation'].data[ry, rx] = celevation return river
def cleanUpFlow(self, river, world)
Validate that for each point in river is equal to or lower than the last
6.894082
4.00628
1.720819
'''Try to find a lower elevation with in a range of an increasing circle's radius and try to find the best path and return it''' x, y = source currentRadius = 1 maxRadius = 40 lowestElevation = world.layers['elevation'].data[y, x] destination = [] notFound = True isWrapped = False wrapped = [] while notFound and currentRadius <= maxRadius: for cx in range(-currentRadius, currentRadius + 1): for cy in range(-currentRadius, currentRadius + 1): rx, ry = x + cx, y + cy # are we within bounds? if not self.wrap and not world.contains((rx, ry)): continue # are we within a circle? if not in_circle(currentRadius, x, y, rx, ry): continue rx, ry = overflow(rx, world.width), overflow(ry, world.height) # if utilities.outOfBounds([x+cx, y+cy], self.size): # print "Fixed:",x ,y, rx, ry elevation = world.layers['elevation'].data[ry, rx] # have we found a lower elevation? if elevation < lowestElevation: lowestElevation = elevation destination = [rx, ry] notFound = False if not world.contains((x + cx, y + cy)): wrapped.append(destination) currentRadius += 1 if destination in wrapped: isWrapped = True # print "Wrapped lower elevation found:", rx, ry, "!" return isWrapped, destination
def findLowerElevation(self, source, world)
Try to find a lower elevation with in a range of an increasing circle's radius and try to find the best path and return it
5.088179
4.027184
1.263458
# erosion around river, create river valley for r in river: rx, ry = r radius = 2 for x in range(rx - radius, rx + radius): for y in range(ry - radius, ry + radius): if not self.wrap and not world.contains( (x, y)): # ignore edges of map continue x, y = overflow(x, world.width), overflow(y, world.height) curve = 1.0 if [x, y] == [0, 0]: # ignore center continue if [x, y] in river: # ignore river itself continue if world.layers['elevation'].data[y, x] <= world.layers['elevation'].data[ry, rx]: # ignore areas lower than river itself continue if not in_circle(radius, rx, ry, x, y): # ignore things outside a circle continue adx, ady = math.fabs(rx - x), math.fabs(ry - y) if adx == 1 or ady == 1: curve = 0.2 elif adx == 2 or ady == 2: curve = 0.05 diff = world.layers['elevation'].data[ry, rx] - world.layers['elevation'].data[y, x] newElevation = world.layers['elevation'].data[y, x] + ( diff * curve) if newElevation <= world.layers['elevation'].data[ry, rx]: print('newElevation is <= than river, fix me...') newElevation = world.layers['elevation'].data[r, x] world.layers['elevation'].data[y, x] = newElevation return
def river_erosion(self, river, world)
Simulate erosion in heightmap based on river path. * current location must be equal to or less than previous location * riverbed is carved out by % of volume/flow * sides of river are also eroded to slope into riverbed.
3.372776
3.340351
1.009707
isSeed = True px, py = (0, 0) for x, y in river: if isSeed: rivermap[y, x] = water_flow[y, x] isSeed = False else: rivermap[y, x] = precipitations[y, x] + rivermap[py, px] px, py = x, y
def rivermap_update(self, river, water_flow, rivermap, precipitations)
Update the rivermap with the rainfall that is to become the waterflow
3.163618
3.417901
0.925603
elevation, plates = generate_plates_simulation(seed, width, height, num_plates=num_plates) world = World(world_name, Size(width, height), seed, GenerationParameters(num_plates, -1.0, "plates")) world.elevation = (numpy.array(elevation).reshape(height, width), None) world.plates = numpy.array(plates, dtype=numpy.uint16).reshape(height, width) # Generate images filename = '%s/plates_%s.png' % (output_dir, world_name) draw_simple_elevation_on_file(world, filename, None) print("+ plates image generated in '%s'" % filename) geo.center_land(world) filename = '%s/centered_plates_%s.png' % (output_dir, world_name) draw_simple_elevation_on_file(world, filename, None) print("+ centered plates image generated in '%s'" % filename)
def generate_plates(seed, world_name, output_dir, width, height, num_plates=10)
Eventually this method should be invoked when generation is called at asked to stop at step "plates", it should not be a different operation :param seed: :param world_name: :param output_dir: :param width: :param height: :param num_plates: :return:
3.393499
3.583491
0.946981
for y in range(world.height): for x in range(world.width): if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0): for dx in range(factor): for dy in range(factor): target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255)) if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0): for dx in range(factor): for dy in range(factor): target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255))
def draw_rivers_on_image(world, target, factor=1)
Draw only the rivers, it expect the background to be in place
1.976769
1.951846
1.012769
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis y_with_min_sum = y_sums.argmin() if get_verbose(): print("geo.center_land: height complete") x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis x_with_min_sum = x_sums.argmin() if get_verbose(): print("geo.center_land: width complete") latshift = 0 world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1) world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1) if get_verbose(): print("geo.center_land: width complete")
def center_land(world)
Translate the map horizontally and vertically to put as much ocean as possible at the borders. It operates on elevation and plates map
2.60859
2.43926
1.069419
ocean_border = int(min(30, max(world.width / 5, world.height / 5))) def place_ocean(x, y, i): world.layers['elevation'].data[y, x] = \ (world.layers['elevation'].data[y, x] * i) / ocean_border for x in range(world.width): for i in range(ocean_border): place_ocean(x, i, i) place_ocean(x, world.height - i - 1, i) for y in range(world.height): for i in range(ocean_border): place_ocean(i, y, i) place_ocean(world.width - i - 1, y, i)
def place_oceans_at_map_borders(world)
Lower the elevation near the border of the map
2.206613
2.152577
1.025103
e = world.layers['elevation'].data ocean = fill_ocean(e, ocean_level) hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains e_th = [('sea', ocean_level), ('plain', hl), ('hill', ml), ('mountain', None)] harmonize_ocean(ocean, e, ocean_level) world.ocean = ocean world.elevation = (e, e_th) world.sea_depth = sea_depth(world, ocean_level)
def initialize_ocean_and_thresholds(world, ocean_level=1.0)
Calculate the ocean, the sea depth and the elevation thresholds :param world: a world having elevation but not thresholds :param ocean_level: the elevation representing the ocean level :return: nothing, the world will be changed
5.900518
5.7262
1.030442
shallow_sea = ocean_level * 0.85 midpoint = shallow_sea / 2.0 ocean_points = numpy.logical_and(elevation < shallow_sea, ocean) shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points) elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0) deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points) elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0)
def harmonize_ocean(ocean, elevation, ocean_level)
The goal of this function is to make the ocean floor less noisy. The underwater erosion should cause the ocean floor to be more uniform
2.69279
2.677057
1.005877
config = Configuration() config.root = root config.version_scheme = version_scheme config.local_scheme = local_scheme config.write_to = write_to config.write_to_template = write_to_template config.relative_to = relative_to config.tag_regex = tag_regex config.fallback_version = fallback_version config.parse = parse config.git_describe_command = git_describe_command parsed_version = _do_parse(config) if parsed_version: version_string = format_version( parsed_version, version_scheme=version_scheme, local_scheme=local_scheme ) dump_version( root=root, version=version_string, write_to=write_to, template=write_to_template, ) return version_string
def get_version( root=".", version_scheme="guess-next-dev", local_scheme="node-and-date", write_to=None, write_to_template=None, relative_to=None, tag_regex=None, fallback_version=None, parse=None, git_describe_command=None, )
If supplied, relative_to should be a file from which root may be resolved. Typically called by a script or module that is not in the root of the repository to direct setuptools_scm to the root of the repository by supplying ``__file__``.
1.763474
1.832955
0.962093
if IS_WINDOWS or PY2: env_dict.update((key, str(value)) for (key, value) in env_dict.items()) return env_dict
def _always_strings(env_dict)
On Windows and Python 2, environment dictionaries must be strings and not unicode.
4.234577
3.285635
1.288815
stat = os.stat(path) info = get_file_info(path) # rewrite st_ino, st_dev, and st_nlink based on file info return nt.stat_result( (stat.st_mode,) + (info.file_index, info.volume_serial_number, info.number_of_links) + stat[4:] )
def compat_stat(path)
Generate stat as found on Python 3.2 and later.
4.425584
4.194473
1.055099
realpath = os.path.normcase(os.path.realpath(path)) seen = set() res = [] for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True): # dirpath with symlinks resolved realdirpath = os.path.normcase(os.path.realpath(dirpath)) def _link_not_in_scm(n): fn = os.path.join(realdirpath, os.path.normcase(n)) return os.path.islink(fn) and fn not in scm_files if realdirpath not in scm_dirs: # directory not in scm, don't walk it's content dirnames[:] = [] continue if ( os.path.islink(dirpath) and not os.path.relpath(realdirpath, realpath).startswith(os.pardir) ): # a symlink to a directory not outside path: # we keep it in the result and don't walk its content res.append(os.path.join(path, os.path.relpath(dirpath, path))) dirnames[:] = [] continue if realdirpath in seen: # symlink loop protection dirnames[:] = [] continue dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)] for filename in filenames: if _link_not_in_scm(filename): continue # dirpath + filename with symlinks preserved fullfilename = os.path.join(dirpath, filename) if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: res.append(os.path.join(path, os.path.relpath(fullfilename, path))) seen.add(realdirpath) return res
def scm_find_files(path, scm_files, scm_dirs)
setuptools compatible file finder that follows symlinks - path: the root directory from which to search - scm_files: set of scm controlled files and symlinks (including symlinks to directories) - scm_dirs: set of scm controlled directories (including directories containing no scm controlled files) scm_files and scm_dirs must be absolute with symlinks resolved (realpath), with normalized case (normcase) Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\ adding-support-for-revision-control-systems
2.427303
2.444831
0.99283
if not config: config = Configuration(root=root) if not has_command("git"): return wd = GitWorkdir.from_potential_worktree(config.absolute_root) if wd is None: return if pre_parse: pre_parse(wd) if config.git_describe_command: describe_command = config.git_describe_command out, unused_err, ret = wd.do_ex(describe_command) if ret: # If 'git git_describe_command' failed, try to get the information otherwise. rev_node = wd.node() dirty = wd.is_dirty() if rev_node is None: return meta("0.0", distance=0, dirty=dirty, config=config) return meta( "0.0", distance=wd.count_all_nodes(), node="g" + rev_node, dirty=dirty, branch=wd.get_branch(), config=config, ) else: tag, number, node, dirty = _git_parse_describe(out) branch = wd.get_branch() if number: return meta( tag, config=config, distance=number, node=node, dirty=dirty, branch=branch, ) else: return meta(tag, config=config, node=node, dirty=dirty, branch=branch)
def parse( root, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_on_shallow, config=None )
:param pre_parse: experimental pre_parse action, may change at any time
4.161372
4.081491
1.019572
trace("tag", tag) if not config: config = Configuration() tagdict = _parse_version_tag(tag, config) if not isinstance(tagdict, dict) or not tagdict.get("version", None): warnings.warn("tag %r no version found" % (tag,)) return None version = tagdict["version"] trace("version pre parse", version) if tagdict.get("suffix", ""): warnings.warn( "tag %r will be stripped of its suffix '%s'" % (tag, tagdict["suffix"]) ) if VERSION_CLASS is not None: version = pkg_parse_version(version) trace("version", repr(version)) return version
def tag_to_version(tag, config=None)
take a tag that might be prefixed with a keyword and return only the version part :param config: optional configuration object
4.313521
4.499693
0.958626
result = [] for tag in tags: tag = tag_to_version(tag, config=config) if tag: result.append(tag) return result
def tags_to_versions(tags, config=None)
take tags that might be prefixed with a keyword and return only the version part :param tags: an iterable of tags :param config: optional configuration object
2.603448
2.911329
0.894247
engine = self.engine connection = engine.connect() db_session = scoped_session( sessionmaker(autocommit=False, autoflush=True, bind=engine)) yield db_session db_session.close() connection.close()
def session(self)
Creates a context with an open SQLAlchemy session.
2.699302
2.287442
1.180052
''' on platforms that do not support symbolic links, lstat is an alias for stat() so we return a tuple of both ''' lst = os.lstat(path) st = os.stat(path) permission = octal_permissions(lst.st_mode), octal_permissions(st.st_mode) return permission
def get_path_permission(path)
on platforms that do not support symbolic links, lstat is an alias for stat() so we return a tuple of both
6.772614
3.123693
2.168143
for i in range(max_retries): try: shutil.rmtree(path) return except OSError as e: if logger: info('Unable to remove path: %s' % path) info('Retrying after %d seconds' % i) time.sleep(i) # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path)
def _robust_rmtree(path, logger=None, max_retries=5)
Try to delete paths robustly . Retries several times (with increasing delays) if an OSError occurs. If the final attempt fails, the Exception is propagated to the caller. Taken from https://github.com/hashdist/hashdist/pull/116
3.695206
3.501391
1.055354
from geonode.settings import INSTALLED_APPS, OGC_SERVER # only start if using Geoserver backend _backend = os.environ.get('BACKEND', OGC_SERVER['default']['BACKEND']) if (_backend == 'geonode.qgis_server' or 'geonode.geoserver' not in INSTALLED_APPS): return download_dir = path('downloaded') if not download_dir.exists(): download_dir.makedirs() geoserver_dir = path('geoserver') geoserver_bin = download_dir / \ os.path.basename(dev_config['GEOSERVER_URL']) jetty_runner = download_dir / \ os.path.basename(dev_config['JETTY_RUNNER_URL']) grab( options.get('geoserver', dev_config['GEOSERVER_URL']), geoserver_bin, "geoserver binary") grab( options.get('jetty', dev_config['JETTY_RUNNER_URL']), jetty_runner, "jetty runner") if not geoserver_dir.exists(): geoserver_dir.makedirs() webapp_dir = geoserver_dir / 'geoserver' if not webapp_dir: webapp_dir.makedirs() print 'extracting geoserver' z = zipfile.ZipFile(geoserver_bin, "r") z.extractall(webapp_dir) _install_data_dir()
def setup_geoserver(options)
Prepare a testing instance of GeoServer.
3.39761
3.356467
1.012258