file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
bn.js
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['bn'] = {
"dir": "ltr",
"editor": "Rich Text Editor",
"common": {
"editorHelp": "Press ALT 0 for help",
"browseServer": "ব্রাউজ সার্ভার",
"url": "URL",
"protocol": "প্রোটোকল",
"upload": "আপলোড",
"uploadSubmit": "ইহাকে সার্ভারে প্রেরন কর",
"image": "ছবির লেবেল যুক্ত কর",
"flash": "ফ্লাশ লেবেল যুক্ত কর",
"form": "ফর্ম",
"checkbox": "চেক বাক্স",
"radio": "রেডিও বাটন",
"textField": "টেক্সট ফীল্ড",
"textarea": "টেক্সট এরিয়া",
"hiddenField": "গুপ্ত ফীল্ড",
"button": "বাটন",
"select": "বাছাই ফীল্ড",
"imageButton": "ছবির বাটন",
"notSet": "<সেট নেই>",
"id": "আইডি",
"name": "নাম",
"langDir": "ভাষা লেখার দিক",
"langDirLtr": "বাম থেকে ডান (LTR)",
"langDirRtl": "ডান থেকে বাম (RTL)",
"langCode": "ভাষা কোড",
"longDescr": "URL এর লম্বা বর্ণনা",
"cssClass": "স্টাইল-শীট ক্লাস",
"advisoryTitle": "পরামর্শ শীর্ষক",
"cssStyle": "স্টাইল",
"ok": "ওকে",
"cancel": "বাতিল",
"close": "Close",
"preview": "প্রিভিউ",
"resize": "Resize",
"generalTab": "General",
"advancedTab": "এডভান্সড",
"validateNumberFailed": "This value is not a number.",
"confirmNewPage": "Any unsaved changes to this content will be lost. Are you sure you want to load new page?",
"confirmCancel": "Some of the options have been changed. Are you sure to close the dialog?",
"options": "Options",
"target": "টার্গেট",
"targetNew": "New Window (_blank)",
"targetTop": "Topmost Window (_top)",
"targetSelf": "Same Window (_self)",
"targetParent": "Parent Window (_parent)",
"langDirLTR": "বাম থেকে ডান (LTR)",
"langDirRTL": "ডান থেকে বাম (RTL)",
"styles": "স্টাইল",
"cssClasses": "স্টাইল-শীট ক্লাস",
"width": "প্রস্থ",
"height": "দৈর্ঘ্য",
|
"alignTop": "উপর",
"alignMiddle": "মধ্য",
"alignBottom": "নীচে",
"invalidValue": "Invalid value.",
"invalidHeight": "Height must be a number.",
"invalidWidth": "Width must be a number.",
"invalidCssLength": "Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).",
"invalidHtmlLength": "Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).",
"invalidInlineStyle": "Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.",
"cssLengthTooltip": "Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).",
"unavailable": "%1<span class=\"cke_accessibility\">, unavailable</span>"
},
"about": {
"copy": "Copyright © $1. All rights reserved.",
"dlgTitle": "About CKEditor",
"help": "Check $1 for help.",
"moreInfo": "For licensing information please visit our web site:",
"title": "About CKEditor",
"userGuide": "CKEditor User's Guide"
},
"basicstyles": {
"bold": "বোল্ড",
"italic": "ইটালিক",
"strike": "স্ট্রাইক থ্রু",
"subscript": "অধোলেখ",
"superscript": "অভিলেখ",
"underline": "আন্ডারলাইন"
},
"blockquote": {"toolbar": "Block Quote"},
"clipboard": {
"copy": "কপি",
"copyError": "আপনার ব্রাউজারের সুরক্ষা সেটিংস এডিটরকে অটোমেটিক কপি করার অনুমতি দেয়নি। দয়া করে এই কাজের জন্য কিবোর্ড ব্যবহার করুন (Ctrl/Cmd+C)।",
"cut": "কাট",
"cutError": "আপনার ব্রাউজারের সুরক্ষা সেটিংস এডিটরকে অটোমেটিক কাট করার অনুমতি দেয়নি। দয়া করে এই কাজের জন্য কিবোর্ড ব্যবহার করুন (Ctrl/Cmd+X)।",
"paste": "পেস্ট",
"pasteArea": "Paste Area",
"pasteMsg": "অনুগ্রহ করে নীচের বাক্সে কিবোর্ড ব্যবহার করে (<STRONG>Ctrl/Cmd+V</STRONG>) পেস্ট করুন এবং <STRONG>OK</STRONG> চাপ দিন",
"securityMsg": "Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.",
"title": "পেস্ট"
},
"contextmenu": {"options": "Context Menu Options"},
"toolbar": {
"toolbarCollapse": "Collapse Toolbar",
"toolbarExpand": "Expand Toolbar",
"toolbarGroups": {
"document": "Document",
"clipboard": "Clipboard/Undo",
"editing": "Editing",
"forms": "Forms",
"basicstyles": "Basic Styles",
"paragraph": "Paragraph",
"links": "Links",
"insert": "Insert",
"styles": "Styles",
"colors": "Colors",
"tools": "Tools"
},
"toolbars": "Editor toolbars"
},
"elementspath": {"eleLabel": "Elements path", "eleTitle": "%1 element"},
"list": {"bulletedlist": "বুলেট লিস্ট লেবেল", "numberedlist": "সাংখ্যিক লিস্টের লেবেল"},
"indent": {"indent": "ইনডেন্ট বাড়াও", "outdent": "ইনডেন্ট কমাও"},
"format": {
"label": "ফন্ট ফরমেট",
"panelTitle": "ফন্ট ফরমেট",
"tag_address": "ঠিকানা",
"tag_div": "শীর্ষক (DIV)",
"tag_h1": "শীর্ষক ১",
"tag_h2": "শীর্ষক ২",
"tag_h3": "শীর্ষক ৩",
"tag_h4": "শীর্ষক ৪",
"tag_h5": "শীর্ষক ৫",
"tag_h6": "শীর্ষক ৬",
"tag_p": "সাধারণ",
"tag_pre": "ফর্মেটেড"
},
"horizontalrule": {"toolbar": "রেখা যুক্ত কর"},
"image": {
"alertUrl": "অনুগ্রহক করে ছবির URL টাইপ করুন",
"alt": "বিকল্প টেক্সট",
"border": "বর্ডার",
"btnUpload": "ইহাকে সার্ভারে প্রেরন কর",
"button2Img": "Do you want to transform the selected image button on a simple image?",
"hSpace": "হরাইজন্টাল স্পেস",
"img2Button": "Do you want to transform the selected image on a image button?",
"infoTab": "ছবির তথ্য",
"linkTab": "লিংক",
"lockRatio": "অনুপাত লক কর",
"menu": "ছবির প্রোপার্টি",
"resetSize": "সাইজ পূর্বাবস্থায় ফিরিয়ে দাও",
"title": "ছবির প্রোপার্টি",
"titleButton": "ছবি বাটন প্রোপার্টি",
"upload": "আপলোড",
"urlMissing": "Image source URL is missing.",
"vSpace": "ভার্টিকেল স্পেস",
"validateBorder": "Border must be a whole number.",
"validateHSpace": "HSpace must be a whole number.",
"validateVSpace": "VSpace must be a whole number."
},
"fakeobjects": {
"anchor": "Anchor",
"flash": "Flash Animation",
"hiddenfield": "Hidden Field",
"iframe": "IFrame",
"unknown": "Unknown Object"
},
"link": {
"acccessKey": "এক্সেস কী",
"advanced": "এডভান্সড",
"advisoryContentType": "পরামর্শ কন্টেন্টের প্রকার",
"advisoryTitle": "পরামর্শ শীর্ষক",
"anchor": {
"toolbar": "নোঙ্গর",
"menu": "নোঙর প্রোপার্টি",
"title": "নোঙর প্রোপার্টি",
"name": "নোঙরের নাম",
"errorName": "নোঙরের নাম টাইপ করুন",
"remove": "Remove Anchor"
},
"anchorId": "নোঙরের আইডি দিয়ে",
"anchorName": "নোঙরের নাম দিয়ে",
"charset": "লিংক রিসোর্স ক্যারেক্টর সেট",
"cssClasses": "স্টাইল-শীট ক্লাস",
"emailAddress": "ইমেইল ঠিকানা",
"emailBody": "মেসেজের দেহ",
"emailSubject": "মেসেজের বিষয়",
"id": "আইডি",
"info": "লিংক তথ্য",
"langCode": "ভাষা লেখার দিক",
"langDir": "ভাষা লেখার দিক",
"langDirLTR": "বাম থেকে ডান (LTR)",
"langDirRTL": "ডান থেকে বাম (RTL)",
"menu": "লিংক সম্পাদন",
"name": "নাম",
"noAnchors": "(No anchors available in the document)",
"noEmail": "অনুগ্রহ করে ইমেইল এড্রেস টাইপ করুন",
"noUrl": "অনুগ্রহ করে URL লিংক টাইপ করুন",
"other": "<other>",
"popupDependent": "ডিপেন্ডেন্ট (Netscape)",
"popupFeatures": "পপআপ উইন্ডো ফীচার সমূহ",
"popupFullScreen": "পূর্ণ পর্দা জুড়ে (IE)",
"popupLeft": "বামের পজিশন",
"popupLocationBar": "লোকেশন বার",
"popupMenuBar": "মেন্যু বার",
"popupResizable": "Resizable",
"popupScrollBars": "স্ক্রল বার",
"popupStatusBar": "স্ট্যাটাস বার",
"popupToolbar": "টুল বার",
"popupTop": "ডানের পজিশন",
"rel": "Relationship",
"selectAnchor": "নোঙর বাছাই",
"styles": "স্টাইল",
"tabIndex": "ট্যাব ইন্ডেক্স",
"target": "টার্গেট",
"targetFrame": "<ফ্রেম>",
"targetFrameName": "টার্গেট ফ্রেমের নাম",
"targetPopup": "<পপআপ উইন্ডো>",
"targetPopupName": "পপআপ উইন্ডোর নাম",
"title": "লিংক",
"toAnchor": "এই পেজে নোঙর কর",
"toEmail": "ইমেইল",
"toUrl": "URL",
"toolbar": "লিংক যুক্ত কর",
"type": "লিংক প্রকার",
"unlink": "লিংক সরাও",
"upload": "আপলোড"
},
"magicline": {"title": "Insert paragraph here"},
"maximize": {"maximize": "Maximize", "minimize": "Minimize"},
"pastetext": {"button": "সাদা টেক্সট হিসেবে পেস্ট কর", "title": "সাদা টেক্সট হিসেবে পেস্ট কর"},
"pastefromword": {
"confirmCleanup": "The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?",
"error": "It was not possible to clean up the pasted data due to an internal error",
"title": "পেস্ট (শব্দ)",
"toolbar": "পেস্ট (শব্দ)"
},
"removeformat": {"toolbar": "ফরমেট সরাও"},
"sourcearea": {"toolbar": "সোর্স"},
"specialchar": {
"options": "Special Character Options",
"title": "বিশেষ ক্যারেক্টার বাছাই কর",
"toolbar": "বিশেষ অক্ষর যুক্ত কর"
},
"scayt": {
"about": "About SCAYT",
"aboutTab": "About",
"addWord": "Add Word",
"allCaps": "Ignore All-Caps Words",
"dic_create": "Create",
"dic_delete": "Delete",
"dic_field_name": "Dictionary name",
"dic_info": "Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.",
"dic_rename": "Rename",
"dic_restore": "Restore",
"dictionariesTab": "Dictionaries",
"disable": "Disable SCAYT",
"emptyDic": "Dictionary name should not be empty.",
"enable": "Enable SCAYT",
"ignore": "Ignore",
"ignoreAll": "Ignore All",
"ignoreDomainNames": "Ignore Domain Names",
"langs": "Languages",
"languagesTab": "Languages",
"mixedCase": "Ignore Words with Mixed Case",
"mixedWithDigits": "Ignore Words with Numbers",
"moreSuggestions": "More suggestions",
"opera_title": "Not supported by Opera",
"options": "Options",
"optionsTab": "Options",
"title": "Spell Check As You Type",
"toggle": "Toggle SCAYT",
"noSuggestions": "No suggestion"
},
"stylescombo": {
"label": "স্টাইল",
"panelTitle": "Formatting Styles",
"panelTitle1": "Block Styles",
"panelTitle2": "Inline Styles",
"panelTitle3": "Object Styles"
},
"table": {
"border": "বর্ডার সাইজ",
"caption": "শীর্ষক",
"cell": {
"menu": "সেল",
"insertBefore": "Insert Cell Before",
"insertAfter": "Insert Cell After",
"deleteCell": "সেল মুছে দাও",
"merge": "সেল জোড়া দাও",
"mergeRight": "Merge Right",
"mergeDown": "Merge Down",
"splitHorizontal": "Split Cell Horizontally",
"splitVertical": "Split Cell Vertically",
"title": "Cell Properties",
"cellType": "Cell Type",
"rowSpan": "Rows Span",
"colSpan": "Columns Span",
"wordWrap": "Word Wrap",
"hAlign": "Horizontal Alignment",
"vAlign": "Vertical Alignment",
"alignBaseline": "Baseline",
"bgColor": "Background Color",
"borderColor": "Border Color",
"data": "Data",
"header": "Header",
"yes": "Yes",
"no": "No",
"invalidWidth": "Cell width must be a number.",
"invalidHeight": "Cell height must be a number.",
"invalidRowSpan": "Rows span must be a whole number.",
"invalidColSpan": "Columns span must be a whole number.",
"chooseColor": "Choose"
},
"cellPad": "সেল প্যাডিং",
"cellSpace": "সেল স্পেস",
"column": {
"menu": "কলাম",
"insertBefore": "Insert Column Before",
"insertAfter": "Insert Column After",
"deleteColumn": "কলাম মুছে দাও"
},
"columns": "কলাম",
"deleteTable": "টেবিল ডিলীট কর",
"headers": "Headers",
"headersBoth": "Both",
"headersColumn": "First column",
"headersNone": "None",
"headersRow": "First Row",
"invalidBorder": "Border size must be a number.",
"invalidCellPadding": "Cell padding must be a positive number.",
"invalidCellSpacing": "Cell spacing must be a positive number.",
"invalidCols": "Number of columns must be a number greater than 0.",
"invalidHeight": "Table height must be a number.",
"invalidRows": "Number of rows must be a number greater than 0.",
"invalidWidth": "Table width must be a number.",
"menu": "টেবিল প্রোপার্টি",
"row": {
"menu": "রো",
"insertBefore": "Insert Row Before",
"insertAfter": "Insert Row After",
"deleteRow": "রো মুছে দাও"
},
"rows": "রো",
"summary": "সারাংশ",
"title": "টেবিল প্রোপার্টি",
"toolbar": "টেবিলের লেবেল যুক্ত কর",
"widthPc": "শতকরা",
"widthPx": "পিক্সেল",
"widthUnit": "width unit"
},
"undo": {"redo": "রি-ডু", "undo": "আনডু"},
"wsc": {
"btnIgnore": "ইগনোর কর",
"btnIgnoreAll": "সব ইগনোর কর",
"btnReplace": "বদলে দাও",
"btnReplaceAll": "সব বদলে দাও",
"btnUndo": "আন্ডু",
"changeTo": "এতে বদলাও",
"errorLoading": "Error loading application service host: %s.",
"ieSpellDownload": "বানান পরীক্ষক ইনস্টল করা নেই। আপনি কি এখনই এটা ডাউনলোড করতে চান?",
"manyChanges": "বানান পরীক্ষা শেষ: %1 গুলো শব্দ বদলে গ্যাছে",
"noChanges": "বানান পরীক্ষা শেষ: কোন শব্দ পরিবর্তন করা হয়নি",
"noMispell": "বানান পরীক্ষা শেষ: কোন ভুল বানান পাওয়া যায়নি",
"noSuggestions": "- কোন সাজেশন নেই -",
"notAvailable": "Sorry, but service is unavailable now.",
"notInDic": "শব্দকোষে নেই",
"oneChange": "বানান পরীক্ষা শেষ: একটি মাত্র শব্দ পরিবর্তন করা হয়েছে",
"progress": "বানান পরীক্ষা চলছে...",
"title": "Spell Check",
"toolbar": "বানান চেক"
}
};
|
"align": "এলাইন",
"alignLeft": "বামে",
"alignRight": "ডানে",
"alignCenter": "মাঝখানে",
|
random_line_split
|
models.py
|
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
"""Return ALL objects.
"""
return self._get_base_queryset()
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def
|
(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
delete
|
identifier_name
|
models.py
|
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
|
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
"""Return ALL objects.
"""
return self._get_base_queryset()
|
random_line_split
|
models.py
|
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
|
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
"""Return ALL objects.
"""
return self._get_base_queryset()
|
identifier_body
|
models.py
|
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
|
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
"""Return ALL objects.
"""
return self._get_base_queryset()
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
_unset_related_many_to_many(obj, field)
|
conditional_block
|
__init__.py
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel
#
# Author: Shuangtai Tian <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
cfg.CONF.register_opts(OPTS)
class C
|
plugin.NotificationBase):
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target defining the exchange and
topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]
|
omputeNotificationBase(
|
identifier_name
|
__init__.py
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel
#
# Author: Shuangtai Tian <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
cfg.CONF.register_opts(OPTS)
class ComputeNotificationBase(plugin.NotificationBase):
@staticmethod
def get_targets(conf):
"
|
""Return a sequence of oslo.messaging.Target defining the exchange and
topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]
|
identifier_body
|
|
__init__.py
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel
#
# Author: Shuangtai Tian <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
|
cfg.CONF.register_opts(OPTS)
class ComputeNotificationBase(plugin.NotificationBase):
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target defining the exchange and
topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]
|
random_line_split
|
|
testapp_spec.js
|
/*global jasmine*/
var excludes = [
"map_events.html",
"map_lazy_init.html",
"map-lazy-load.html",
"marker_with_dynamic_position.html",
"marker_with_dynamic_address.html",
"marker_with_info_window.html",
"places-auto-complete.html"
];
function using(values, func)
|
describe('testapp directory', function() {
'use strict';
//var urls = ["aerial-rotate.html", "aerial-simple.html", "hello_map.html", "map_control.html"];
var files = require('fs').readdirSync(__dirname + "/../../testapp");
var urls = files.filter(function(filename) {
return filename.match(/\.html$/) && excludes.indexOf(filename) === -1;
});
console.log('urls', urls);
using(urls, function(url){
it('testapp/'+url, function() {
browser.get(url);
browser.wait( function() {
return browser.executeScript( function() {
var el = document.querySelector("map");
var scope = angular.element(el).scope();
//return scope.map.getCenter().lat();
return scope.map.getCenter();
}).then(function(result) {
return result;
});
}, 5000);
//element(by.css("map")).evaluate('map.getCenter().lat()').then(function(lat) {
// console.log('lat', lat);
// expect(lat).toNotEqual(0);
//});
browser.manage().logs().get('browser').then(function(browserLog) {
(browserLog.length > 0) && console.log('log: ' + require('util').inspect(browserLog));
expect(browserLog).toEqual([]);
});
});
});
});
|
{
for (var i = 0, count = values.length; i < count; i++) {
if (Object.prototype.toString.call(values[i]) !== '[object Array]') {
values[i] = [values[i]];
}
func.apply(this, values[i]);
jasmine.currentEnv_.currentSpec.description += ' (with using ' + values[i].join(', ') + ')';
}
}
|
identifier_body
|
testapp_spec.js
|
/*global jasmine*/
var excludes = [
"map_events.html",
"map_lazy_init.html",
"map-lazy-load.html",
"marker_with_dynamic_position.html",
"marker_with_dynamic_address.html",
"marker_with_info_window.html",
"places-auto-complete.html"
];
|
if (Object.prototype.toString.call(values[i]) !== '[object Array]') {
values[i] = [values[i]];
}
func.apply(this, values[i]);
jasmine.currentEnv_.currentSpec.description += ' (with using ' + values[i].join(', ') + ')';
}
}
describe('testapp directory', function() {
'use strict';
//var urls = ["aerial-rotate.html", "aerial-simple.html", "hello_map.html", "map_control.html"];
var files = require('fs').readdirSync(__dirname + "/../../testapp");
var urls = files.filter(function(filename) {
return filename.match(/\.html$/) && excludes.indexOf(filename) === -1;
});
console.log('urls', urls);
using(urls, function(url){
it('testapp/'+url, function() {
browser.get(url);
browser.wait( function() {
return browser.executeScript( function() {
var el = document.querySelector("map");
var scope = angular.element(el).scope();
//return scope.map.getCenter().lat();
return scope.map.getCenter();
}).then(function(result) {
return result;
});
}, 5000);
//element(by.css("map")).evaluate('map.getCenter().lat()').then(function(lat) {
// console.log('lat', lat);
// expect(lat).toNotEqual(0);
//});
browser.manage().logs().get('browser').then(function(browserLog) {
(browserLog.length > 0) && console.log('log: ' + require('util').inspect(browserLog));
expect(browserLog).toEqual([]);
});
});
});
});
|
function using(values, func){
for (var i = 0, count = values.length; i < count; i++) {
|
random_line_split
|
testapp_spec.js
|
/*global jasmine*/
var excludes = [
"map_events.html",
"map_lazy_init.html",
"map-lazy-load.html",
"marker_with_dynamic_position.html",
"marker_with_dynamic_address.html",
"marker_with_info_window.html",
"places-auto-complete.html"
];
function
|
(values, func){
for (var i = 0, count = values.length; i < count; i++) {
if (Object.prototype.toString.call(values[i]) !== '[object Array]') {
values[i] = [values[i]];
}
func.apply(this, values[i]);
jasmine.currentEnv_.currentSpec.description += ' (with using ' + values[i].join(', ') + ')';
}
}
describe('testapp directory', function() {
'use strict';
//var urls = ["aerial-rotate.html", "aerial-simple.html", "hello_map.html", "map_control.html"];
var files = require('fs').readdirSync(__dirname + "/../../testapp");
var urls = files.filter(function(filename) {
return filename.match(/\.html$/) && excludes.indexOf(filename) === -1;
});
console.log('urls', urls);
using(urls, function(url){
it('testapp/'+url, function() {
browser.get(url);
browser.wait( function() {
return browser.executeScript( function() {
var el = document.querySelector("map");
var scope = angular.element(el).scope();
//return scope.map.getCenter().lat();
return scope.map.getCenter();
}).then(function(result) {
return result;
});
}, 5000);
//element(by.css("map")).evaluate('map.getCenter().lat()').then(function(lat) {
// console.log('lat', lat);
// expect(lat).toNotEqual(0);
//});
browser.manage().logs().get('browser').then(function(browserLog) {
(browserLog.length > 0) && console.log('log: ' + require('util').inspect(browserLog));
expect(browserLog).toEqual([]);
});
});
});
});
|
using
|
identifier_name
|
testapp_spec.js
|
/*global jasmine*/
var excludes = [
"map_events.html",
"map_lazy_init.html",
"map-lazy-load.html",
"marker_with_dynamic_position.html",
"marker_with_dynamic_address.html",
"marker_with_info_window.html",
"places-auto-complete.html"
];
function using(values, func){
for (var i = 0, count = values.length; i < count; i++)
|
}
describe('testapp directory', function() {
'use strict';
//var urls = ["aerial-rotate.html", "aerial-simple.html", "hello_map.html", "map_control.html"];
var files = require('fs').readdirSync(__dirname + "/../../testapp");
var urls = files.filter(function(filename) {
return filename.match(/\.html$/) && excludes.indexOf(filename) === -1;
});
console.log('urls', urls);
using(urls, function(url){
it('testapp/'+url, function() {
browser.get(url);
browser.wait( function() {
return browser.executeScript( function() {
var el = document.querySelector("map");
var scope = angular.element(el).scope();
//return scope.map.getCenter().lat();
return scope.map.getCenter();
}).then(function(result) {
return result;
});
}, 5000);
//element(by.css("map")).evaluate('map.getCenter().lat()').then(function(lat) {
// console.log('lat', lat);
// expect(lat).toNotEqual(0);
//});
browser.manage().logs().get('browser').then(function(browserLog) {
(browserLog.length > 0) && console.log('log: ' + require('util').inspect(browserLog));
expect(browserLog).toEqual([]);
});
});
});
});
|
{
if (Object.prototype.toString.call(values[i]) !== '[object Array]') {
values[i] = [values[i]];
}
func.apply(this, values[i]);
jasmine.currentEnv_.currentSpec.description += ' (with using ' + values[i].join(', ') + ')';
}
|
conditional_block
|
home.component.ts
|
import { Component, OnDestroy, OnInit } from '@angular/core';
import 'rxjs/add/operator/startWith';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import { Observable } from 'rxjs';
import { DPostList } from '../services/post/post.dto';
import { select, Store } from '@ngrx/store';
import { IAppState } from '../app.store';
import { GetLatest } from '../services/post/post.actions';
import {
SubscribeToLatestWall,
UnSubscribeFromLatestWall,
} from '../services/websocket/websocket.actions';
import { filter } from 'rxjs/internal/operators';
@Component({
selector: 'app-home',
templateUrl: './home.component.html',
styleUrls: ['./home.component.scss'],
})
export class
|
implements OnInit, OnDestroy {
postList$: Observable<DPostList>;
authenticated$: Observable<boolean>;
websocketConnected$: Observable<boolean>;
page: number;
hashtags = [
{
name: 'eve',
posts: 3415,
},
{
name: 'mining',
posts: 1482,
},
{
name: 'isk',
posts: 1023,
},
{
name: 'test',
posts: 939,
},
{
name: 'bees',
posts: 712,
},
{
name: 'killmail',
posts: 452,
},
];
constructor(
private store: Store<IAppState>,
) {
this.postList$ = this.store.pipe(select('post', 'list', 'latest'));
this.authenticated$ = this.store.pipe(select('authentication', 'authenticated'));
this.websocketConnected$ = this.store.pipe(select('websocket', 'connected'))
}
ngOnDestroy() {
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new UnSubscribeFromLatestWall());
});
}
ngOnInit() {
this.page = 0;
this.authenticated$.pipe(
filter(authenticated => authenticated)
).subscribe(() => {
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
});
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new SubscribeToLatestWall());
});
}
onScroll() {
this.page++;
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
}
}
|
HomeComponent
|
identifier_name
|
home.component.ts
|
import { Component, OnDestroy, OnInit } from '@angular/core';
import 'rxjs/add/operator/startWith';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import { Observable } from 'rxjs';
import { DPostList } from '../services/post/post.dto';
import { select, Store } from '@ngrx/store';
import { IAppState } from '../app.store';
import { GetLatest } from '../services/post/post.actions';
import {
SubscribeToLatestWall,
UnSubscribeFromLatestWall,
} from '../services/websocket/websocket.actions';
import { filter } from 'rxjs/internal/operators';
@Component({
selector: 'app-home',
templateUrl: './home.component.html',
styleUrls: ['./home.component.scss'],
})
export class HomeComponent implements OnInit, OnDestroy {
postList$: Observable<DPostList>;
authenticated$: Observable<boolean>;
websocketConnected$: Observable<boolean>;
page: number;
hashtags = [
{
name: 'eve',
posts: 3415,
},
{
name: 'mining',
posts: 1482,
},
{
name: 'isk',
posts: 1023,
},
{
name: 'test',
posts: 939,
},
{
name: 'bees',
posts: 712,
},
{
name: 'killmail',
posts: 452,
},
];
constructor(
private store: Store<IAppState>,
)
|
ngOnDestroy() {
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new UnSubscribeFromLatestWall());
});
}
ngOnInit() {
this.page = 0;
this.authenticated$.pipe(
filter(authenticated => authenticated)
).subscribe(() => {
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
});
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new SubscribeToLatestWall());
});
}
onScroll() {
this.page++;
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
}
}
|
{
this.postList$ = this.store.pipe(select('post', 'list', 'latest'));
this.authenticated$ = this.store.pipe(select('authentication', 'authenticated'));
this.websocketConnected$ = this.store.pipe(select('websocket', 'connected'))
}
|
identifier_body
|
home.component.ts
|
import { Component, OnDestroy, OnInit } from '@angular/core';
import 'rxjs/add/operator/startWith';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import { Observable } from 'rxjs';
import { DPostList } from '../services/post/post.dto';
import { select, Store } from '@ngrx/store';
import { IAppState } from '../app.store';
import { GetLatest } from '../services/post/post.actions';
import {
SubscribeToLatestWall,
UnSubscribeFromLatestWall,
} from '../services/websocket/websocket.actions';
import { filter } from 'rxjs/internal/operators';
@Component({
selector: 'app-home',
templateUrl: './home.component.html',
styleUrls: ['./home.component.scss'],
})
export class HomeComponent implements OnInit, OnDestroy {
postList$: Observable<DPostList>;
|
websocketConnected$: Observable<boolean>;
page: number;
hashtags = [
{
name: 'eve',
posts: 3415,
},
{
name: 'mining',
posts: 1482,
},
{
name: 'isk',
posts: 1023,
},
{
name: 'test',
posts: 939,
},
{
name: 'bees',
posts: 712,
},
{
name: 'killmail',
posts: 452,
},
];
constructor(
private store: Store<IAppState>,
) {
this.postList$ = this.store.pipe(select('post', 'list', 'latest'));
this.authenticated$ = this.store.pipe(select('authentication', 'authenticated'));
this.websocketConnected$ = this.store.pipe(select('websocket', 'connected'))
}
ngOnDestroy() {
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new UnSubscribeFromLatestWall());
});
}
ngOnInit() {
this.page = 0;
this.authenticated$.pipe(
filter(authenticated => authenticated)
).subscribe(() => {
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
});
this.websocketConnected$.pipe(
filter(connected => connected)
).subscribe(() => {
this.store.dispatch(new SubscribeToLatestWall());
});
}
onScroll() {
this.page++;
this.store.dispatch(new GetLatest({ page: this.page, limit: 20 }));
}
}
|
authenticated$: Observable<boolean>;
|
random_line_split
|
toPromise-spec.ts
|
/** @prettier */
import { expect } from 'chai';
import { of, EMPTY, throwError, config } from 'rxjs';
/** @test {toPromise} */
describe('Observable.toPromise', () => {
it('should convert an Observable to a promise of its last value', (done) => {
of(1, 2, 3)
.toPromise(Promise)
.then((x) => {
expect(x).to.equal(3);
done();
});
});
it('should convert an empty Observable to a promise of undefined', (done) => {
EMPTY.toPromise(Promise).then((x) => {
expect(x).to.be.undefined;
|
done();
});
});
it('should handle errors properly', (done) => {
throwError(() => 'bad')
.toPromise(Promise)
.then(
() => {
done(new Error('should not be called'));
},
(err: any) => {
expect(err).to.equal('bad');
done();
}
);
});
it('should allow for global config via config.Promise', async () => {
try {
let wasCalled = false;
config.Promise = function MyPromise(callback: Function) {
wasCalled = true;
return new Promise(callback as any);
} as any;
const x = await of(42).toPromise();
expect(wasCalled).to.be.true;
expect(x).to.equal(42);
} finally {
config.Promise = undefined;
}
});
});
|
random_line_split
|
|
route_guard.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::path::PathBuf;
use rocket::Route;
#[get("/<path..>")]
fn files(route: &Route, path: PathBuf) -> String {
format!("{}/{}", route.base(), path.to_string_lossy())
}
mod route_guard_tests {
use super::*;
use rocket::local::Client;
fn assert_path(client: &Client, path: &str) {
let mut res = client.get(path).dispatch();
assert_eq!(res.body_string(), Some(path.into()));
}
#[test]
fn check_mount_path()
|
}
|
{
let rocket = rocket::ignite()
.mount("/first", routes![files])
.mount("/second", routes![files]);
let client = Client::new(rocket).unwrap();
assert_path(&client, "/first/some/path");
assert_path(&client, "/second/some/path");
assert_path(&client, "/first/second/b/c");
assert_path(&client, "/second/a/b/c");
}
|
identifier_body
|
route_guard.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::path::PathBuf;
use rocket::Route;
|
fn files(route: &Route, path: PathBuf) -> String {
format!("{}/{}", route.base(), path.to_string_lossy())
}
mod route_guard_tests {
use super::*;
use rocket::local::Client;
fn assert_path(client: &Client, path: &str) {
let mut res = client.get(path).dispatch();
assert_eq!(res.body_string(), Some(path.into()));
}
#[test]
fn check_mount_path() {
let rocket = rocket::ignite()
.mount("/first", routes![files])
.mount("/second", routes![files]);
let client = Client::new(rocket).unwrap();
assert_path(&client, "/first/some/path");
assert_path(&client, "/second/some/path");
assert_path(&client, "/first/second/b/c");
assert_path(&client, "/second/a/b/c");
}
}
|
#[get("/<path..>")]
|
random_line_split
|
route_guard.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::path::PathBuf;
use rocket::Route;
#[get("/<path..>")]
fn files(route: &Route, path: PathBuf) -> String {
format!("{}/{}", route.base(), path.to_string_lossy())
}
mod route_guard_tests {
use super::*;
use rocket::local::Client;
fn
|
(client: &Client, path: &str) {
let mut res = client.get(path).dispatch();
assert_eq!(res.body_string(), Some(path.into()));
}
#[test]
fn check_mount_path() {
let rocket = rocket::ignite()
.mount("/first", routes![files])
.mount("/second", routes![files]);
let client = Client::new(rocket).unwrap();
assert_path(&client, "/first/some/path");
assert_path(&client, "/second/some/path");
assert_path(&client, "/first/second/b/c");
assert_path(&client, "/second/a/b/c");
}
}
|
assert_path
|
identifier_name
|
index.tsx
|
/**
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { h, Component, FunctionalComponent } from 'preact';
import { Result, Topics, Champions } from 'shared/state';
import {
getResultLabel,
getResultTopic,
getResultChampionName,
fetchJSON,
escapePatchPathComponent,
} from 'client/utils';
import Select from '../../select';
interface Props {
topics: Topics;
champions: Champions;
path: string;
result?: Result;
onShowSlides: (url: string) => void;
onCreateVote: () => void;
}
const AdminSelectedBracketWrapper: FunctionalComponent<{}> = ({ children }) => (
<section>
<h1>Selected bracket</h1>
{children}
</section>
);
const TopicsSelect: FunctionalComponent<{
value: string;
topics: Topics;
onInput: (event: Event) => void;
}> = ({ onInput, topics, value }) => {
const sortedTopics = Object.entries(topics).sort((a, b) =>
a[1].label > b[1].label ? 1 : -1,
);
return (
<Select onInput={onInput} value={value}>
<option value=""></option>
{sortedTopics.map(([id, topic]) => (
<option value={id}>{topic.label}</option>
))}
</Select>
);
};
export default class AdminSelectedBracket extends Component<Props> {
private _onTopicSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${this.props.path}/items/${Number(
container.dataset.itemIndex,
)}`,
value: el.value,
},
],
});
};
private _onChampionSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const topicId = this.props.result!.items[
Number(container.dataset.itemIndex)
];
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/topics/${escapePatchPathComponent(
topicId as string,
)}/championId`,
value: el.value,
},
],
});
};
private
|
(index: number) {
const path = this.props.path === '/' ? '' : this.props.path;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${path}/winningIndex`,
value: index,
},
],
});
}
private _onWinnerSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
this._setWinner(Number(container.dataset.itemIndex));
};
private _onShowSlides = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const index = Number(container.dataset.itemIndex);
const topic = getResultTopic(
this.props.topics,
this.props.result!.items[index],
);
this.props.onShowSlides(topic!.slidesURL);
};
private _onClearWinner = () => {
this._setWinner(-1);
};
private _onCreateVote = () => {
this.props.onCreateVote();
};
private _onZoomHere = () => {
let toHighlight: string[];
if (this.props.path === '/') {
toHighlight = ['0', '1'];
} else {
const parentPath = this.props.path
.split('/items/')
.slice(1)
.join('-');
toHighlight = [parentPath, parentPath + '-0', parentPath + '-1'];
}
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/bracketZoom`,
value: toHighlight,
},
],
});
};
render({ result, topics, champions }: Props) {
if (!result) {
return (
<AdminSelectedBracketWrapper>
<p>No bracket item selected</p>
</AdminSelectedBracketWrapper>
);
}
return (
<AdminSelectedBracketWrapper>
<div class="admin-form-items">
{result.items.map((item, i) => {
const isLeaf = typeof item === 'string';
const topic = getResultTopic(topics, item);
const topicAndChamp = isLeaf
? [
<div>
<label>
<span class="label">Topic</span>
<TopicsSelect
value={item as string}
topics={topics}
onInput={this._onTopicSelect}
/>
</label>
</div>,
<div>
<label>
<span class="label">Champion</span>
<Select
disabled={!topic}
value={topic ? topic.championId : ''}
onInput={this._onChampionSelect}
>
<option value="">{topic ? 'None' : ''}</option>
{Object.entries(champions).map(([id, champion]) => (
<option value={id}>{champion.name}</option>
))}
</Select>
</label>
</div>,
]
: [
<div>
<span class="label">Topic</span>{' '}
<span class="prefilled-input">
{getResultLabel(topics, item) || 'Pending'}
</span>
</div>,
<div>
<span class="label">Champion</span>{' '}
<span class="prefilled-input">
{getResultChampionName(topics, item, champions) ||
'Pending'}
</span>
</div>,
];
return (
<div class="admin-form-item" data-item-index={i}>
{topicAndChamp}
<div>
<button
class="button"
disabled={result.winningIndex === i}
onClick={this._onWinnerSelect}
>
Set as winner
</button>{' '}
{topic && topic.slidesURL && (
<button class="button" onClick={this._onShowSlides}>
Show slides
</button>
)}
</div>
</div>
);
})}
<div class="admin-form-extras">
<div class="label">Actions</div>
<div>
<button
class="button"
disabled={result.winningIndex === -1}
onClick={this._onClearWinner}
>
Clear winner
</button>{' '}
<button class="button" onClick={this._onCreateVote}>
Create vote
</button>{' '}
<button class="button" onClick={this._onZoomHere}>
Zoom here
</button>
</div>
</div>
</div>
</AdminSelectedBracketWrapper>
);
}
}
|
_setWinner
|
identifier_name
|
index.tsx
|
/**
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { h, Component, FunctionalComponent } from 'preact';
import { Result, Topics, Champions } from 'shared/state';
import {
getResultLabel,
getResultTopic,
getResultChampionName,
fetchJSON,
escapePatchPathComponent,
} from 'client/utils';
import Select from '../../select';
interface Props {
topics: Topics;
champions: Champions;
path: string;
result?: Result;
onShowSlides: (url: string) => void;
onCreateVote: () => void;
}
const AdminSelectedBracketWrapper: FunctionalComponent<{}> = ({ children }) => (
<section>
<h1>Selected bracket</h1>
{children}
</section>
);
const TopicsSelect: FunctionalComponent<{
value: string;
topics: Topics;
onInput: (event: Event) => void;
}> = ({ onInput, topics, value }) => {
const sortedTopics = Object.entries(topics).sort((a, b) =>
a[1].label > b[1].label ? 1 : -1,
);
return (
<Select onInput={onInput} value={value}>
<option value=""></option>
{sortedTopics.map(([id, topic]) => (
<option value={id}>{topic.label}</option>
))}
</Select>
);
};
export default class AdminSelectedBracket extends Component<Props> {
private _onTopicSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${this.props.path}/items/${Number(
container.dataset.itemIndex,
)}`,
value: el.value,
},
],
});
};
private _onChampionSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const topicId = this.props.result!.items[
Number(container.dataset.itemIndex)
];
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/topics/${escapePatchPathComponent(
topicId as string,
)}/championId`,
value: el.value,
},
],
});
};
private _setWinner(index: number)
|
private _onWinnerSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
this._setWinner(Number(container.dataset.itemIndex));
};
private _onShowSlides = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const index = Number(container.dataset.itemIndex);
const topic = getResultTopic(
this.props.topics,
this.props.result!.items[index],
);
this.props.onShowSlides(topic!.slidesURL);
};
private _onClearWinner = () => {
this._setWinner(-1);
};
private _onCreateVote = () => {
this.props.onCreateVote();
};
private _onZoomHere = () => {
let toHighlight: string[];
if (this.props.path === '/') {
toHighlight = ['0', '1'];
} else {
const parentPath = this.props.path
.split('/items/')
.slice(1)
.join('-');
toHighlight = [parentPath, parentPath + '-0', parentPath + '-1'];
}
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/bracketZoom`,
value: toHighlight,
},
],
});
};
render({ result, topics, champions }: Props) {
if (!result) {
return (
<AdminSelectedBracketWrapper>
<p>No bracket item selected</p>
</AdminSelectedBracketWrapper>
);
}
return (
<AdminSelectedBracketWrapper>
<div class="admin-form-items">
{result.items.map((item, i) => {
const isLeaf = typeof item === 'string';
const topic = getResultTopic(topics, item);
const topicAndChamp = isLeaf
? [
<div>
<label>
<span class="label">Topic</span>
<TopicsSelect
value={item as string}
topics={topics}
onInput={this._onTopicSelect}
/>
</label>
</div>,
<div>
<label>
<span class="label">Champion</span>
<Select
disabled={!topic}
value={topic ? topic.championId : ''}
onInput={this._onChampionSelect}
>
<option value="">{topic ? 'None' : ''}</option>
{Object.entries(champions).map(([id, champion]) => (
<option value={id}>{champion.name}</option>
))}
</Select>
</label>
</div>,
]
: [
<div>
<span class="label">Topic</span>{' '}
<span class="prefilled-input">
{getResultLabel(topics, item) || 'Pending'}
</span>
</div>,
<div>
<span class="label">Champion</span>{' '}
<span class="prefilled-input">
{getResultChampionName(topics, item, champions) ||
'Pending'}
</span>
</div>,
];
return (
<div class="admin-form-item" data-item-index={i}>
{topicAndChamp}
<div>
<button
class="button"
disabled={result.winningIndex === i}
onClick={this._onWinnerSelect}
>
Set as winner
</button>{' '}
{topic && topic.slidesURL && (
<button class="button" onClick={this._onShowSlides}>
Show slides
</button>
)}
</div>
</div>
);
})}
<div class="admin-form-extras">
<div class="label">Actions</div>
<div>
<button
class="button"
disabled={result.winningIndex === -1}
onClick={this._onClearWinner}
>
Clear winner
</button>{' '}
<button class="button" onClick={this._onCreateVote}>
Create vote
</button>{' '}
<button class="button" onClick={this._onZoomHere}>
Zoom here
</button>
</div>
</div>
</div>
</AdminSelectedBracketWrapper>
);
}
}
|
{
const path = this.props.path === '/' ? '' : this.props.path;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${path}/winningIndex`,
value: index,
},
],
});
}
|
identifier_body
|
index.tsx
|
/**
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { h, Component, FunctionalComponent } from 'preact';
import { Result, Topics, Champions } from 'shared/state';
import {
getResultLabel,
getResultTopic,
getResultChampionName,
fetchJSON,
escapePatchPathComponent,
} from 'client/utils';
import Select from '../../select';
interface Props {
topics: Topics;
champions: Champions;
path: string;
result?: Result;
onShowSlides: (url: string) => void;
onCreateVote: () => void;
}
const AdminSelectedBracketWrapper: FunctionalComponent<{}> = ({ children }) => (
<section>
<h1>Selected bracket</h1>
{children}
</section>
);
const TopicsSelect: FunctionalComponent<{
value: string;
topics: Topics;
onInput: (event: Event) => void;
}> = ({ onInput, topics, value }) => {
const sortedTopics = Object.entries(topics).sort((a, b) =>
a[1].label > b[1].label ? 1 : -1,
);
return (
<Select onInput={onInput} value={value}>
<option value=""></option>
{sortedTopics.map(([id, topic]) => (
<option value={id}>{topic.label}</option>
))}
</Select>
);
};
export default class AdminSelectedBracket extends Component<Props> {
private _onTopicSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${this.props.path}/items/${Number(
container.dataset.itemIndex,
)}`,
value: el.value,
},
],
});
};
private _onChampionSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const topicId = this.props.result!.items[
Number(container.dataset.itemIndex)
];
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/topics/${escapePatchPathComponent(
topicId as string,
)}/championId`,
value: el.value,
},
],
});
};
private _setWinner(index: number) {
const path = this.props.path === '/' ? '' : this.props.path;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${path}/winningIndex`,
value: index,
},
],
});
}
private _onWinnerSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
this._setWinner(Number(container.dataset.itemIndex));
};
private _onShowSlides = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const index = Number(container.dataset.itemIndex);
const topic = getResultTopic(
this.props.topics,
this.props.result!.items[index],
);
this.props.onShowSlides(topic!.slidesURL);
};
private _onClearWinner = () => {
this._setWinner(-1);
};
private _onCreateVote = () => {
this.props.onCreateVote();
};
private _onZoomHere = () => {
let toHighlight: string[];
if (this.props.path === '/')
|
else {
const parentPath = this.props.path
.split('/items/')
.slice(1)
.join('-');
toHighlight = [parentPath, parentPath + '-0', parentPath + '-1'];
}
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/bracketZoom`,
value: toHighlight,
},
],
});
};
render({ result, topics, champions }: Props) {
if (!result) {
return (
<AdminSelectedBracketWrapper>
<p>No bracket item selected</p>
</AdminSelectedBracketWrapper>
);
}
return (
<AdminSelectedBracketWrapper>
<div class="admin-form-items">
{result.items.map((item, i) => {
const isLeaf = typeof item === 'string';
const topic = getResultTopic(topics, item);
const topicAndChamp = isLeaf
? [
<div>
<label>
<span class="label">Topic</span>
<TopicsSelect
value={item as string}
topics={topics}
onInput={this._onTopicSelect}
/>
</label>
</div>,
<div>
<label>
<span class="label">Champion</span>
<Select
disabled={!topic}
value={topic ? topic.championId : ''}
onInput={this._onChampionSelect}
>
<option value="">{topic ? 'None' : ''}</option>
{Object.entries(champions).map(([id, champion]) => (
<option value={id}>{champion.name}</option>
))}
</Select>
</label>
</div>,
]
: [
<div>
<span class="label">Topic</span>{' '}
<span class="prefilled-input">
{getResultLabel(topics, item) || 'Pending'}
</span>
</div>,
<div>
<span class="label">Champion</span>{' '}
<span class="prefilled-input">
{getResultChampionName(topics, item, champions) ||
'Pending'}
</span>
</div>,
];
return (
<div class="admin-form-item" data-item-index={i}>
{topicAndChamp}
<div>
<button
class="button"
disabled={result.winningIndex === i}
onClick={this._onWinnerSelect}
>
Set as winner
</button>{' '}
{topic && topic.slidesURL && (
<button class="button" onClick={this._onShowSlides}>
Show slides
</button>
)}
</div>
</div>
);
})}
<div class="admin-form-extras">
<div class="label">Actions</div>
<div>
<button
class="button"
disabled={result.winningIndex === -1}
onClick={this._onClearWinner}
>
Clear winner
</button>{' '}
<button class="button" onClick={this._onCreateVote}>
Create vote
</button>{' '}
<button class="button" onClick={this._onZoomHere}>
Zoom here
</button>
</div>
</div>
</div>
</AdminSelectedBracketWrapper>
);
}
}
|
{
toHighlight = ['0', '1'];
}
|
conditional_block
|
index.tsx
|
/**
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { h, Component, FunctionalComponent } from 'preact';
import { Result, Topics, Champions } from 'shared/state';
import {
getResultLabel,
getResultTopic,
getResultChampionName,
fetchJSON,
escapePatchPathComponent,
} from 'client/utils';
import Select from '../../select';
interface Props {
topics: Topics;
champions: Champions;
path: string;
result?: Result;
onShowSlides: (url: string) => void;
onCreateVote: () => void;
}
const AdminSelectedBracketWrapper: FunctionalComponent<{}> = ({ children }) => (
<section>
<h1>Selected bracket</h1>
{children}
</section>
);
const TopicsSelect: FunctionalComponent<{
value: string;
topics: Topics;
onInput: (event: Event) => void;
}> = ({ onInput, topics, value }) => {
const sortedTopics = Object.entries(topics).sort((a, b) =>
a[1].label > b[1].label ? 1 : -1,
);
return (
<Select onInput={onInput} value={value}>
<option value=""></option>
{sortedTopics.map(([id, topic]) => (
<option value={id}>{topic.label}</option>
))}
</Select>
);
};
export default class AdminSelectedBracket extends Component<Props> {
private _onTopicSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${this.props.path}/items/${Number(
container.dataset.itemIndex,
)}`,
value: el.value,
},
],
});
};
private _onChampionSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const topicId = this.props.result!.items[
Number(container.dataset.itemIndex)
];
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/topics/${escapePatchPathComponent(
topicId as string,
)}/championId`,
value: el.value,
},
],
});
};
private _setWinner(index: number) {
const path = this.props.path === '/' ? '' : this.props.path;
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/results${path}/winningIndex`,
value: index,
},
],
});
}
private _onWinnerSelect = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
this._setWinner(Number(container.dataset.itemIndex));
};
private _onShowSlides = (event: Event) => {
const el = event.currentTarget as HTMLSelectElement;
const container = el.closest('.admin-form-item') as HTMLElement;
const index = Number(container.dataset.itemIndex);
const topic = getResultTopic(
this.props.topics,
this.props.result!.items[index],
);
this.props.onShowSlides(topic!.slidesURL);
};
private _onClearWinner = () => {
this._setWinner(-1);
};
private _onCreateVote = () => {
this.props.onCreateVote();
};
private _onZoomHere = () => {
let toHighlight: string[];
|
.split('/items/')
.slice(1)
.join('-');
toHighlight = [parentPath, parentPath + '-0', parentPath + '-1'];
}
fetchJSON('/admin/patch', {
method: 'PATCH',
body: [
{
op: 'replace',
path: `/bracketZoom`,
value: toHighlight,
},
],
});
};
render({ result, topics, champions }: Props) {
if (!result) {
return (
<AdminSelectedBracketWrapper>
<p>No bracket item selected</p>
</AdminSelectedBracketWrapper>
);
}
return (
<AdminSelectedBracketWrapper>
<div class="admin-form-items">
{result.items.map((item, i) => {
const isLeaf = typeof item === 'string';
const topic = getResultTopic(topics, item);
const topicAndChamp = isLeaf
? [
<div>
<label>
<span class="label">Topic</span>
<TopicsSelect
value={item as string}
topics={topics}
onInput={this._onTopicSelect}
/>
</label>
</div>,
<div>
<label>
<span class="label">Champion</span>
<Select
disabled={!topic}
value={topic ? topic.championId : ''}
onInput={this._onChampionSelect}
>
<option value="">{topic ? 'None' : ''}</option>
{Object.entries(champions).map(([id, champion]) => (
<option value={id}>{champion.name}</option>
))}
</Select>
</label>
</div>,
]
: [
<div>
<span class="label">Topic</span>{' '}
<span class="prefilled-input">
{getResultLabel(topics, item) || 'Pending'}
</span>
</div>,
<div>
<span class="label">Champion</span>{' '}
<span class="prefilled-input">
{getResultChampionName(topics, item, champions) ||
'Pending'}
</span>
</div>,
];
return (
<div class="admin-form-item" data-item-index={i}>
{topicAndChamp}
<div>
<button
class="button"
disabled={result.winningIndex === i}
onClick={this._onWinnerSelect}
>
Set as winner
</button>{' '}
{topic && topic.slidesURL && (
<button class="button" onClick={this._onShowSlides}>
Show slides
</button>
)}
</div>
</div>
);
})}
<div class="admin-form-extras">
<div class="label">Actions</div>
<div>
<button
class="button"
disabled={result.winningIndex === -1}
onClick={this._onClearWinner}
>
Clear winner
</button>{' '}
<button class="button" onClick={this._onCreateVote}>
Create vote
</button>{' '}
<button class="button" onClick={this._onZoomHere}>
Zoom here
</button>
</div>
</div>
</div>
</AdminSelectedBracketWrapper>
);
}
}
|
if (this.props.path === '/') {
toHighlight = ['0', '1'];
} else {
const parentPath = this.props.path
|
random_line_split
|
FileScanner.py
|
# scan a set of file
from __future__ import print_function
import os
import fnmatch
import tempfile
from ScanFile import ScanFile
class FileScanner:
def __init__(self, handler=None, ignore_filters=None, scanners=None,
error_handler=None, ram_bytes=10 * 1024 * 1024,
skip_handler=None):
"""the handler will be called with all the scanned files.
the optional ignore_filters contains a list of glob pattern to
ignore file names"""
self.handler = handler
self.error_handler = error_handler
self.skip_handler = skip_handler
self.ignore_filters = ignore_filters
self.scanners = scanners
self.ram_bytes = ram_bytes
def scan(self, path):
"""start scanning a path. either a file or directory"""
if os.path.isdir(path):
return self._scan_dir(path)
elif os.path.isfile(path):
return self._scan_file(path)
else:
return True
def scan_obj(self, scan_file, check_ignore=True):
"""pass a ScanFile to check"""
if check_ignore and self._is_ignored(scan_file.get_local_path()):
return False
# does a scanner match?
sf = scan_file
sc = self.scanners
if sc is not None:
for s in sc:
if s.can_handle(sf):
ok = s.handle(sf, self)
sf.close()
return ok
# no match call user's handler
ok = self._call_handler(sf)
sf.close()
return ok
def _scan_dir(self, path):
if self._is_ignored(path):
return True
for root, dirs, files in os.walk(path):
for name in files:
if not self._scan_file(os.path.join(root,name)):
return False
for name in dirs:
if not self._scan_dir(os.path.join(root,name)):
return False
return True
def _scan_file(self, path):
if self._is_ignored(path):
return True
# build a scan file
try:
size = os.path.getsize(path)
with open(path, "rb") as fobj:
sf = ScanFile(path, fobj, size, True, True)
return self.scan_obj(sf, False)
except IOError as e:
eh = self.error_handler
if eh is not None:
sf = ScanFile(path, None, 0)
return eh(sf, e)
else:
# ignore error
return True
def _is_ignored(self, path):
if self.ignore_filters is not None:
base = os.path.basename(path)
for f in self.ignore_filters:
if fnmatch.fnmatch(base, f):
return True
return False
def _call_handler(self, scan_file):
if self.handler is not None:
return self.handler(scan_file)
else:
return True
def _call_skip_handler(self, scan_file):
if self.skip_handler is not None:
return self.skip_handler(scan_file)
else:
return True
def promote_scan_file(self, scan_file, seekable=False, file_based=False):
if not seekable and not file_base:
return scan_file
fb = file_based
if not fb and seekable and scan_file.size > self.ram_bytes:
fb = True
sf = scan_file.create_clone(seekable, fb)
scan_file.close()
return sf
# mini test
if __name__ == '__main__':
import sys
ifs = ['*.txt']
def handler(scan_file):
|
def error_handler(scan_file, error):
print("FAILED:", scan_file, error)
raise error
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
print(scan_file)
return True
|
identifier_body
|
FileScanner.py
|
# scan a set of file
from __future__ import print_function
import os
import fnmatch
import tempfile
from ScanFile import ScanFile
class FileScanner:
def __init__(self, handler=None, ignore_filters=None, scanners=None,
error_handler=None, ram_bytes=10 * 1024 * 1024,
skip_handler=None):
"""the handler will be called with all the scanned files.
the optional ignore_filters contains a list of glob pattern to
ignore file names"""
self.handler = handler
self.error_handler = error_handler
self.skip_handler = skip_handler
self.ignore_filters = ignore_filters
self.scanners = scanners
self.ram_bytes = ram_bytes
def scan(self, path):
"""start scanning a path. either a file or directory"""
if os.path.isdir(path):
return self._scan_dir(path)
elif os.path.isfile(path):
return self._scan_file(path)
else:
return True
def scan_obj(self, scan_file, check_ignore=True):
"""pass a ScanFile to check"""
if check_ignore and self._is_ignored(scan_file.get_local_path()):
return False
# does a scanner match?
sf = scan_file
sc = self.scanners
if sc is not None:
for s in sc:
if s.can_handle(sf):
ok = s.handle(sf, self)
sf.close()
return ok
# no match call user's handler
ok = self._call_handler(sf)
sf.close()
return ok
def _scan_dir(self, path):
if self._is_ignored(path):
return True
for root, dirs, files in os.walk(path):
for name in files:
if not self._scan_file(os.path.join(root,name)):
return False
for name in dirs:
if not self._scan_dir(os.path.join(root,name)):
return False
return True
def _scan_file(self, path):
if self._is_ignored(path):
return True
# build a scan file
try:
size = os.path.getsize(path)
with open(path, "rb") as fobj:
sf = ScanFile(path, fobj, size, True, True)
return self.scan_obj(sf, False)
except IOError as e:
eh = self.error_handler
if eh is not None:
sf = ScanFile(path, None, 0)
return eh(sf, e)
else:
# ignore error
return True
def _is_ignored(self, path):
if self.ignore_filters is not None:
base = os.path.basename(path)
for f in self.ignore_filters:
if fnmatch.fnmatch(base, f):
return True
return False
def _call_handler(self, scan_file):
if self.handler is not None:
return self.handler(scan_file)
else:
return True
def _call_skip_handler(self, scan_file):
if self.skip_handler is not None:
return self.skip_handler(scan_file)
else:
return True
def promote_scan_file(self, scan_file, seekable=False, file_based=False):
if not seekable and not file_base:
return scan_file
fb = file_based
if not fb and seekable and scan_file.size > self.ram_bytes:
fb = True
sf = scan_file.create_clone(seekable, fb)
scan_file.close()
return sf
# mini test
if __name__ == '__main__':
import sys
ifs = ['*.txt']
def handler(scan_file):
print(scan_file)
return True
def error_handler(scan_file, error):
print("FAILED:", scan_file, error)
raise error
|
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
random_line_split
|
|
FileScanner.py
|
# scan a set of file
from __future__ import print_function
import os
import fnmatch
import tempfile
from ScanFile import ScanFile
class FileScanner:
def __init__(self, handler=None, ignore_filters=None, scanners=None,
error_handler=None, ram_bytes=10 * 1024 * 1024,
skip_handler=None):
"""the handler will be called with all the scanned files.
the optional ignore_filters contains a list of glob pattern to
ignore file names"""
self.handler = handler
self.error_handler = error_handler
self.skip_handler = skip_handler
self.ignore_filters = ignore_filters
self.scanners = scanners
self.ram_bytes = ram_bytes
def scan(self, path):
"""start scanning a path. either a file or directory"""
if os.path.isdir(path):
return self._scan_dir(path)
elif os.path.isfile(path):
return self._scan_file(path)
else:
return True
def scan_obj(self, scan_file, check_ignore=True):
"""pass a ScanFile to check"""
if check_ignore and self._is_ignored(scan_file.get_local_path()):
return False
# does a scanner match?
sf = scan_file
sc = self.scanners
if sc is not None:
for s in sc:
if s.can_handle(sf):
ok = s.handle(sf, self)
sf.close()
return ok
# no match call user's handler
ok = self._call_handler(sf)
sf.close()
return ok
def _scan_dir(self, path):
if self._is_ignored(path):
return True
for root, dirs, files in os.walk(path):
|
return True
def _scan_file(self, path):
if self._is_ignored(path):
return True
# build a scan file
try:
size = os.path.getsize(path)
with open(path, "rb") as fobj:
sf = ScanFile(path, fobj, size, True, True)
return self.scan_obj(sf, False)
except IOError as e:
eh = self.error_handler
if eh is not None:
sf = ScanFile(path, None, 0)
return eh(sf, e)
else:
# ignore error
return True
def _is_ignored(self, path):
if self.ignore_filters is not None:
base = os.path.basename(path)
for f in self.ignore_filters:
if fnmatch.fnmatch(base, f):
return True
return False
def _call_handler(self, scan_file):
if self.handler is not None:
return self.handler(scan_file)
else:
return True
def _call_skip_handler(self, scan_file):
if self.skip_handler is not None:
return self.skip_handler(scan_file)
else:
return True
def promote_scan_file(self, scan_file, seekable=False, file_based=False):
if not seekable and not file_base:
return scan_file
fb = file_based
if not fb and seekable and scan_file.size > self.ram_bytes:
fb = True
sf = scan_file.create_clone(seekable, fb)
scan_file.close()
return sf
# mini test
if __name__ == '__main__':
import sys
ifs = ['*.txt']
def handler(scan_file):
print(scan_file)
return True
def error_handler(scan_file, error):
print("FAILED:", scan_file, error)
raise error
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
for name in files:
if not self._scan_file(os.path.join(root,name)):
return False
for name in dirs:
if not self._scan_dir(os.path.join(root,name)):
return False
|
conditional_block
|
FileScanner.py
|
# scan a set of file
from __future__ import print_function
import os
import fnmatch
import tempfile
from ScanFile import ScanFile
class FileScanner:
def __init__(self, handler=None, ignore_filters=None, scanners=None,
error_handler=None, ram_bytes=10 * 1024 * 1024,
skip_handler=None):
"""the handler will be called with all the scanned files.
the optional ignore_filters contains a list of glob pattern to
ignore file names"""
self.handler = handler
self.error_handler = error_handler
self.skip_handler = skip_handler
self.ignore_filters = ignore_filters
self.scanners = scanners
self.ram_bytes = ram_bytes
def scan(self, path):
"""start scanning a path. either a file or directory"""
if os.path.isdir(path):
return self._scan_dir(path)
elif os.path.isfile(path):
return self._scan_file(path)
else:
return True
def scan_obj(self, scan_file, check_ignore=True):
"""pass a ScanFile to check"""
if check_ignore and self._is_ignored(scan_file.get_local_path()):
return False
# does a scanner match?
sf = scan_file
sc = self.scanners
if sc is not None:
for s in sc:
if s.can_handle(sf):
ok = s.handle(sf, self)
sf.close()
return ok
# no match call user's handler
ok = self._call_handler(sf)
sf.close()
return ok
def _scan_dir(self, path):
if self._is_ignored(path):
return True
for root, dirs, files in os.walk(path):
for name in files:
if not self._scan_file(os.path.join(root,name)):
return False
for name in dirs:
if not self._scan_dir(os.path.join(root,name)):
return False
return True
def _scan_file(self, path):
if self._is_ignored(path):
return True
# build a scan file
try:
size = os.path.getsize(path)
with open(path, "rb") as fobj:
sf = ScanFile(path, fobj, size, True, True)
return self.scan_obj(sf, False)
except IOError as e:
eh = self.error_handler
if eh is not None:
sf = ScanFile(path, None, 0)
return eh(sf, e)
else:
# ignore error
return True
def _is_ignored(self, path):
if self.ignore_filters is not None:
base = os.path.basename(path)
for f in self.ignore_filters:
if fnmatch.fnmatch(base, f):
return True
return False
def _call_handler(self, scan_file):
if self.handler is not None:
return self.handler(scan_file)
else:
return True
def _call_skip_handler(self, scan_file):
if self.skip_handler is not None:
return self.skip_handler(scan_file)
else:
return True
def
|
(self, scan_file, seekable=False, file_based=False):
if not seekable and not file_base:
return scan_file
fb = file_based
if not fb and seekable and scan_file.size > self.ram_bytes:
fb = True
sf = scan_file.create_clone(seekable, fb)
scan_file.close()
return sf
# mini test
if __name__ == '__main__':
import sys
ifs = ['*.txt']
def handler(scan_file):
print(scan_file)
return True
def error_handler(scan_file, error):
print("FAILED:", scan_file, error)
raise error
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
promote_scan_file
|
identifier_name
|
rcvr-borrowed-to-region.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait get {
fn get(self) -> int;
}
// Note: impl on a slice; we're checking that the pointers below
// correctly get borrowed to `&`. (similar to impling for `int`, with
// `&self` instead of `self`.)
impl<'self> get for &'self int {
fn get(self) -> int {
return *self;
}
}
|
assert_eq!(y, 6);
let x = @6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = ~6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = &6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
}
|
pub fn main() {
let x = @mut 6;
let y = x.get();
|
random_line_split
|
rcvr-borrowed-to-region.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait get {
fn get(self) -> int;
}
// Note: impl on a slice; we're checking that the pointers below
// correctly get borrowed to `&`. (similar to impling for `int`, with
// `&self` instead of `self`.)
impl<'self> get for &'self int {
fn get(self) -> int {
return *self;
}
}
pub fn main()
|
{
let x = @mut 6;
let y = x.get();
assert_eq!(y, 6);
let x = @6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = ~6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = &6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
}
|
identifier_body
|
|
rcvr-borrowed-to-region.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait get {
fn get(self) -> int;
}
// Note: impl on a slice; we're checking that the pointers below
// correctly get borrowed to `&`. (similar to impling for `int`, with
// `&self` instead of `self`.)
impl<'self> get for &'self int {
fn
|
(self) -> int {
return *self;
}
}
pub fn main() {
let x = @mut 6;
let y = x.get();
assert_eq!(y, 6);
let x = @6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = ~6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
let x = &6;
let y = x.get();
info2!("y={}", y);
assert_eq!(y, 6);
}
|
get
|
identifier_name
|
ast.rs
|
use std::fmt;
use std::from_str::FromStr;
use operators::{Operator, Sub, Skip, Loop};
/**
The internal parsed representation of a program source.
*/
pub struct Ast(~[Operator]);
impl Ast {
/**
Produce an AST from a source string.
This is the most commod method to generate an Ast.
*/
pub fn parse_str(source: &str) -> Result<Ast, ~str> {
/*
We parse loops by making a context to group its operators,
pushing on it until the matching loop end. As we create the
context, we push the previous one onto a stack. After the
nest has been collected, we pop the context and replace it
with the subprocess operator.
*/
let mut stack:~[ ~[Operator] ] = ~[];
let mut ops: ~[Operator] = ~[];
for token in source.chars() {
match from_str::<Operator>(token.to_str()) {
/*
Start of a loop. Produce a new context in which
to push operators, and push the old one on the
stack.
*/
Some(Skip) => {
stack.push(ops);
ops = ~[];
}
/*
End of a loop. Make a subprocess operator out of
the just-collected context, and push that on the
previous context.
*/
Some(Loop) => {
let sub_ast = Sub(Ast( ops ));
// Try to pop the previous context from the stack.
// If this does not work, it's an unmatched `]`.
ops = match stack.pop() {
Some(ops) => ops,
_ => return Err(~"Unmatched `]`."),
};
ops.push(sub_ast);
}
// Push the operator onto the context.
Some(op) => ops.push(op),
// Unknown. Probably comments. Nop.
_ => continue
}
}
// If we still have things on the stack, then we have one or
// more unmatched `[`.
if ! stack.is_empty() {
return Err(~"Unmatched `[`.");
}
// Everything went well.
return Ok(Ast(ops));
}
}
impl FromStr for Ast {
fn from_str(source: &str) -> Option<Ast> {
Ast::parse_str(source).ok()
}
}
impl fmt::Show for Ast {
/**
Parses a string into the matching operator.
*/
|
)
}
}
|
fn fmt(&self, f:&mut fmt::Formatter) -> fmt::Result {
let &Ast(ref ops) = self;
let display = |op: &Operator| -> ~str { format!("{}", op) };
let repr: ~[~str] = ops.iter().map(display).collect();
f.buf.write(format!("{}", repr.concat()).as_bytes()
|
random_line_split
|
ast.rs
|
use std::fmt;
use std::from_str::FromStr;
use operators::{Operator, Sub, Skip, Loop};
/**
The internal parsed representation of a program source.
*/
pub struct Ast(~[Operator]);
impl Ast {
/**
Produce an AST from a source string.
This is the most commod method to generate an Ast.
*/
pub fn parse_str(source: &str) -> Result<Ast, ~str>
|
}
impl FromStr for Ast {
fn from_str(source: &str) -> Option<Ast> {
Ast::parse_str(source).ok()
}
}
impl fmt::Show for Ast {
/**
Parses a string into the matching operator.
*/
fn fmt(&self, f:&mut fmt::Formatter) -> fmt::Result {
let &Ast(ref ops) = self;
let display = |op: &Operator| -> ~str { format!("{}", op) };
let repr: ~[~str] = ops.iter().map(display).collect();
f.buf.write(format!("{}", repr.concat()).as_bytes()
)
}
}
|
{
/*
We parse loops by making a context to group its operators,
pushing on it until the matching loop end. As we create the
context, we push the previous one onto a stack. After the
nest has been collected, we pop the context and replace it
with the subprocess operator.
*/
let mut stack:~[ ~[Operator] ] = ~[];
let mut ops: ~[Operator] = ~[];
for token in source.chars() {
match from_str::<Operator>(token.to_str()) {
/*
Start of a loop. Produce a new context in which
to push operators, and push the old one on the
stack.
*/
Some(Skip) => {
stack.push(ops);
ops = ~[];
}
/*
End of a loop. Make a subprocess operator out of
the just-collected context, and push that on the
previous context.
*/
Some(Loop) => {
let sub_ast = Sub(Ast( ops ));
// Try to pop the previous context from the stack.
// If this does not work, it's an unmatched `]`.
ops = match stack.pop() {
Some(ops) => ops,
_ => return Err(~"Unmatched `]`."),
};
ops.push(sub_ast);
}
// Push the operator onto the context.
Some(op) => ops.push(op),
// Unknown. Probably comments. Nop.
_ => continue
}
}
// If we still have things on the stack, then we have one or
// more unmatched `[`.
if ! stack.is_empty() {
return Err(~"Unmatched `[`.");
}
// Everything went well.
return Ok(Ast(ops));
}
|
identifier_body
|
ast.rs
|
use std::fmt;
use std::from_str::FromStr;
use operators::{Operator, Sub, Skip, Loop};
/**
The internal parsed representation of a program source.
*/
pub struct
|
(~[Operator]);
impl Ast {
/**
Produce an AST from a source string.
This is the most commod method to generate an Ast.
*/
pub fn parse_str(source: &str) -> Result<Ast, ~str> {
/*
We parse loops by making a context to group its operators,
pushing on it until the matching loop end. As we create the
context, we push the previous one onto a stack. After the
nest has been collected, we pop the context and replace it
with the subprocess operator.
*/
let mut stack:~[ ~[Operator] ] = ~[];
let mut ops: ~[Operator] = ~[];
for token in source.chars() {
match from_str::<Operator>(token.to_str()) {
/*
Start of a loop. Produce a new context in which
to push operators, and push the old one on the
stack.
*/
Some(Skip) => {
stack.push(ops);
ops = ~[];
}
/*
End of a loop. Make a subprocess operator out of
the just-collected context, and push that on the
previous context.
*/
Some(Loop) => {
let sub_ast = Sub(Ast( ops ));
// Try to pop the previous context from the stack.
// If this does not work, it's an unmatched `]`.
ops = match stack.pop() {
Some(ops) => ops,
_ => return Err(~"Unmatched `]`."),
};
ops.push(sub_ast);
}
// Push the operator onto the context.
Some(op) => ops.push(op),
// Unknown. Probably comments. Nop.
_ => continue
}
}
// If we still have things on the stack, then we have one or
// more unmatched `[`.
if ! stack.is_empty() {
return Err(~"Unmatched `[`.");
}
// Everything went well.
return Ok(Ast(ops));
}
}
impl FromStr for Ast {
fn from_str(source: &str) -> Option<Ast> {
Ast::parse_str(source).ok()
}
}
impl fmt::Show for Ast {
/**
Parses a string into the matching operator.
*/
fn fmt(&self, f:&mut fmt::Formatter) -> fmt::Result {
let &Ast(ref ops) = self;
let display = |op: &Operator| -> ~str { format!("{}", op) };
let repr: ~[~str] = ops.iter().map(display).collect();
f.buf.write(format!("{}", repr.concat()).as_bytes()
)
}
}
|
Ast
|
identifier_name
|
scale.rs
|
/// Enum that describes how the
/// transcription from a value to a color is done.
pub enum Scale {
/// Linearly translate a value range to a color range
Linear { min: f32, max: f32},
/// Log (ish) translation from a value range to a color range
Log { min: f32, max: f32},
/// Exponantial (ish) translation from a value range to a color range
Exponential { min: f32, max: f32},
/// Apply no transformation
Equal,
}
#[inline] // Called once per pixel, I believe it make sense to inline it (might be wrong)
/// Describe how a value will be translated to a color domain
///
/// Returns a float in [0; 1]
///
pub fn
|
(scale: &Scale, value: f32) -> f32 {
match scale {
&Scale::Linear{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(value - min) / (max - min)
}
},
&Scale::Log{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(1. + value - min).log(10.) / (1. + max - min).log(10.)
}
},
&Scale::Exponential {min, max} => {
if value <= min {
0.
} else if value >= max {
1.
} else {
((value - min) / (max - min)).exp() / (1_f32).exp()
}
},
&Scale::Equal => { value },
}
}
|
normalize
|
identifier_name
|
scale.rs
|
/// Enum that describes how the
/// transcription from a value to a color is done.
pub enum Scale {
/// Linearly translate a value range to a color range
Linear { min: f32, max: f32},
/// Log (ish) translation from a value range to a color range
Log { min: f32, max: f32},
/// Exponantial (ish) translation from a value range to a color range
Exponential { min: f32, max: f32},
/// Apply no transformation
Equal,
}
#[inline] // Called once per pixel, I believe it make sense to inline it (might be wrong)
/// Describe how a value will be translated to a color domain
///
/// Returns a float in [0; 1]
///
pub fn normalize(scale: &Scale, value: f32) -> f32 {
match scale {
&Scale::Linear{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(value - min) / (max - min)
}
},
&Scale::Log{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(1. + value - min).log(10.) / (1. + max - min).log(10.)
}
},
&Scale::Exponential {min, max} => {
if value <= min {
0.
} else if value >= max {
1.
} else {
((value - min) / (max - min)).exp() / (1_f32).exp()
}
},
&Scale::Equal =>
|
,
}
}
|
{ value }
|
conditional_block
|
scale.rs
|
/// Enum that describes how the
/// transcription from a value to a color is done.
pub enum Scale {
/// Linearly translate a value range to a color range
Linear { min: f32, max: f32},
/// Log (ish) translation from a value range to a color range
Log { min: f32, max: f32},
/// Exponantial (ish) translation from a value range to a color range
Exponential { min: f32, max: f32},
/// Apply no transformation
Equal,
}
#[inline] // Called once per pixel, I believe it make sense to inline it (might be wrong)
/// Describe how a value will be translated to a color domain
///
/// Returns a float in [0; 1]
///
pub fn normalize(scale: &Scale, value: f32) -> f32 {
match scale {
&Scale::Linear{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(value - min) / (max - min)
}
},
&Scale::Log{min, max} => {
if value < min {
0.
} else if value > max {
1.
} else {
(1. + value - min).log(10.) / (1. + max - min).log(10.)
}
},
&Scale::Exponential {min, max} => {
if value <= min {
0.
} else if value >= max {
1.
} else {
((value - min) / (max - min)).exp() / (1_f32).exp()
}
|
}
}
|
},
&Scale::Equal => { value },
|
random_line_split
|
RESOURCE_TYPES.ts
|
import indexById from 'common/indexById';
import { ClassResources } from 'parser/core/Events';
export interface Resource {
id: number;
name: string;
icon: string;
url: string;
}
const RESOURCE_TYPES: { [key: string]: Resource } = {
MANA: {
// Paladin, Priest, Shaman, Mage, Warlock, Monk, Druid
id: 0,
name: 'Mana',
icon: 'inv_elemental_mote_mana',
url: 'mana',
},
RAGE: {
// Warrior, Druid
id: 1,
name: 'Rage',
icon: 'spell_misc_emotionangry',
url: 'rage',
},
FOCUS: {
// Hunter
id: 2,
name: 'Focus',
icon: 'ability_hunter_focusfire',
url: 'focus',
},
ENERGY: {
// Rogue, Monk, Druid
id: 3,
name: 'Energy',
icon: 'spell_shadow_shadowworddominate',
url: 'energy',
},
COMBO_POINTS: {
// Rogue, Druid
id: 4,
name: 'Combo Points',
icon: 'inv_mace_2h_pvp410_c_01',
url: 'combo-points',
},
RUNES: {
// Death Knight
id: 5,
name: 'Runes',
icon: 'spell_deathknight_frozenruneweapon',
url: 'runes',
},
RUNIC_POWER: {
// Death Knight
id: 6,
name: 'Runic Power',
icon: 'inv_sword_62',
url: 'runic-power',
},
SOUL_SHARDS: {
// Warlock
id: 7,
name: 'Soul Shards',
icon: 'inv_misc_gem_amethyst_02',
url: 'soul-shards',
},
ASTRAL_POWER: {
// Druid
id: 8,
name: 'Astral Power',
icon: 'ability_druid_eclipseorange',
url: 'astral-power',
},
HOLY_POWER: {
// Paladin
id: 9,
name: 'Holy Power',
icon: 'achievement_bg_winsoa',
url: 'holy-power',
},
ALTERNATE_POWER: {
// Used for encounter-specific resources like Torment on Demonic Inqusition
id: 10,
name: 'Alternate Power',
icon: 'trade_engineering',
url: '',
},
MAELSTROM: {
// Shaman
id: 11,
name: 'Maelstrom',
icon: 'spell_fire_masterofelements',
url: 'maelstrom',
},
CHI: {
// Monk
id: 12,
name: 'Chi',
icon: 'ability_monk_healthsphere',
url: 'chi',
},
INSANITY: {
// Priest
id: 13,
name: 'Insanity',
icon: 'spell_priest_shadoworbs',
url: 'insanity',
},
// 14 is obsolete
// 15 is obsolete
ARCANE_CHARGES: {
// Mage
id: 16,
name: 'Arcane Charges',
icon: 'spell_arcane_arcane01',
url: 'arcane-charges',
},
FURY: {
// Demon Hunter
id: 17,
name: 'Fury',
icon: 'ability_demonhunter_eyebeam',
url: 'fury',
},
PAIN: {
// Demon Hunter
id: 18,
name: 'Pain',
icon: 'ability_demonhunter_demonspikes',
url: 'pain',
},
};
export default indexById(RESOURCE_TYPES);
export function getResource(classResources: ClassResources[] | undefined, type: number) {
if (!classResources)
|
return classResources.find((resource) => resource.type === type);
}
|
{
return undefined;
}
|
conditional_block
|
RESOURCE_TYPES.ts
|
import indexById from 'common/indexById';
import { ClassResources } from 'parser/core/Events';
export interface Resource {
id: number;
name: string;
icon: string;
url: string;
}
const RESOURCE_TYPES: { [key: string]: Resource } = {
MANA: {
// Paladin, Priest, Shaman, Mage, Warlock, Monk, Druid
id: 0,
name: 'Mana',
icon: 'inv_elemental_mote_mana',
url: 'mana',
},
RAGE: {
// Warrior, Druid
id: 1,
name: 'Rage',
icon: 'spell_misc_emotionangry',
url: 'rage',
},
FOCUS: {
// Hunter
id: 2,
name: 'Focus',
icon: 'ability_hunter_focusfire',
url: 'focus',
},
ENERGY: {
// Rogue, Monk, Druid
id: 3,
name: 'Energy',
icon: 'spell_shadow_shadowworddominate',
url: 'energy',
},
COMBO_POINTS: {
// Rogue, Druid
id: 4,
name: 'Combo Points',
icon: 'inv_mace_2h_pvp410_c_01',
url: 'combo-points',
},
RUNES: {
// Death Knight
id: 5,
name: 'Runes',
icon: 'spell_deathknight_frozenruneweapon',
url: 'runes',
},
RUNIC_POWER: {
// Death Knight
id: 6,
name: 'Runic Power',
icon: 'inv_sword_62',
url: 'runic-power',
},
SOUL_SHARDS: {
// Warlock
id: 7,
name: 'Soul Shards',
icon: 'inv_misc_gem_amethyst_02',
url: 'soul-shards',
},
ASTRAL_POWER: {
// Druid
id: 8,
name: 'Astral Power',
icon: 'ability_druid_eclipseorange',
url: 'astral-power',
},
HOLY_POWER: {
// Paladin
id: 9,
name: 'Holy Power',
icon: 'achievement_bg_winsoa',
url: 'holy-power',
},
ALTERNATE_POWER: {
// Used for encounter-specific resources like Torment on Demonic Inqusition
id: 10,
name: 'Alternate Power',
icon: 'trade_engineering',
url: '',
},
MAELSTROM: {
// Shaman
id: 11,
name: 'Maelstrom',
icon: 'spell_fire_masterofelements',
url: 'maelstrom',
},
CHI: {
// Monk
id: 12,
name: 'Chi',
icon: 'ability_monk_healthsphere',
url: 'chi',
},
INSANITY: {
// Priest
id: 13,
name: 'Insanity',
icon: 'spell_priest_shadoworbs',
url: 'insanity',
},
// 14 is obsolete
// 15 is obsolete
ARCANE_CHARGES: {
// Mage
id: 16,
name: 'Arcane Charges',
icon: 'spell_arcane_arcane01',
url: 'arcane-charges',
},
FURY: {
// Demon Hunter
id: 17,
name: 'Fury',
icon: 'ability_demonhunter_eyebeam',
url: 'fury',
},
PAIN: {
// Demon Hunter
id: 18,
name: 'Pain',
icon: 'ability_demonhunter_demonspikes',
url: 'pain',
},
};
export default indexById(RESOURCE_TYPES);
export function getResource(classResources: ClassResources[] | undefined, type: number)
|
{
if (!classResources) {
return undefined;
}
return classResources.find((resource) => resource.type === type);
}
|
identifier_body
|
|
RESOURCE_TYPES.ts
|
import indexById from 'common/indexById';
import { ClassResources } from 'parser/core/Events';
export interface Resource {
id: number;
name: string;
icon: string;
url: string;
}
const RESOURCE_TYPES: { [key: string]: Resource } = {
MANA: {
// Paladin, Priest, Shaman, Mage, Warlock, Monk, Druid
id: 0,
name: 'Mana',
icon: 'inv_elemental_mote_mana',
url: 'mana',
},
RAGE: {
// Warrior, Druid
id: 1,
name: 'Rage',
icon: 'spell_misc_emotionangry',
url: 'rage',
},
FOCUS: {
// Hunter
id: 2,
name: 'Focus',
icon: 'ability_hunter_focusfire',
url: 'focus',
},
ENERGY: {
// Rogue, Monk, Druid
id: 3,
name: 'Energy',
icon: 'spell_shadow_shadowworddominate',
url: 'energy',
},
COMBO_POINTS: {
// Rogue, Druid
id: 4,
name: 'Combo Points',
icon: 'inv_mace_2h_pvp410_c_01',
url: 'combo-points',
},
RUNES: {
// Death Knight
id: 5,
name: 'Runes',
icon: 'spell_deathknight_frozenruneweapon',
url: 'runes',
},
RUNIC_POWER: {
// Death Knight
id: 6,
name: 'Runic Power',
icon: 'inv_sword_62',
url: 'runic-power',
},
SOUL_SHARDS: {
// Warlock
id: 7,
name: 'Soul Shards',
icon: 'inv_misc_gem_amethyst_02',
url: 'soul-shards',
},
ASTRAL_POWER: {
// Druid
id: 8,
name: 'Astral Power',
icon: 'ability_druid_eclipseorange',
url: 'astral-power',
},
HOLY_POWER: {
// Paladin
id: 9,
name: 'Holy Power',
icon: 'achievement_bg_winsoa',
url: 'holy-power',
},
ALTERNATE_POWER: {
// Used for encounter-specific resources like Torment on Demonic Inqusition
id: 10,
name: 'Alternate Power',
icon: 'trade_engineering',
url: '',
},
MAELSTROM: {
// Shaman
id: 11,
name: 'Maelstrom',
icon: 'spell_fire_masterofelements',
url: 'maelstrom',
},
CHI: {
// Monk
id: 12,
name: 'Chi',
icon: 'ability_monk_healthsphere',
url: 'chi',
},
INSANITY: {
// Priest
id: 13,
name: 'Insanity',
icon: 'spell_priest_shadoworbs',
url: 'insanity',
},
// 14 is obsolete
// 15 is obsolete
ARCANE_CHARGES: {
// Mage
id: 16,
name: 'Arcane Charges',
icon: 'spell_arcane_arcane01',
url: 'arcane-charges',
},
FURY: {
// Demon Hunter
id: 17,
name: 'Fury',
icon: 'ability_demonhunter_eyebeam',
url: 'fury',
},
PAIN: {
// Demon Hunter
id: 18,
name: 'Pain',
icon: 'ability_demonhunter_demonspikes',
url: 'pain',
},
};
export default indexById(RESOURCE_TYPES);
export function
|
(classResources: ClassResources[] | undefined, type: number) {
if (!classResources) {
return undefined;
}
return classResources.find((resource) => resource.type === type);
}
|
getResource
|
identifier_name
|
RESOURCE_TYPES.ts
|
import indexById from 'common/indexById';
import { ClassResources } from 'parser/core/Events';
export interface Resource {
id: number;
name: string;
icon: string;
url: string;
}
const RESOURCE_TYPES: { [key: string]: Resource } = {
MANA: {
// Paladin, Priest, Shaman, Mage, Warlock, Monk, Druid
id: 0,
name: 'Mana',
icon: 'inv_elemental_mote_mana',
url: 'mana',
},
RAGE: {
// Warrior, Druid
id: 1,
name: 'Rage',
icon: 'spell_misc_emotionangry',
url: 'rage',
},
FOCUS: {
// Hunter
id: 2,
name: 'Focus',
icon: 'ability_hunter_focusfire',
url: 'focus',
},
ENERGY: {
// Rogue, Monk, Druid
id: 3,
name: 'Energy',
icon: 'spell_shadow_shadowworddominate',
url: 'energy',
},
COMBO_POINTS: {
// Rogue, Druid
id: 4,
name: 'Combo Points',
icon: 'inv_mace_2h_pvp410_c_01',
url: 'combo-points',
},
RUNES: {
// Death Knight
id: 5,
name: 'Runes',
icon: 'spell_deathknight_frozenruneweapon',
url: 'runes',
},
RUNIC_POWER: {
// Death Knight
id: 6,
name: 'Runic Power',
icon: 'inv_sword_62',
url: 'runic-power',
},
SOUL_SHARDS: {
// Warlock
id: 7,
name: 'Soul Shards',
icon: 'inv_misc_gem_amethyst_02',
url: 'soul-shards',
},
ASTRAL_POWER: {
// Druid
id: 8,
name: 'Astral Power',
icon: 'ability_druid_eclipseorange',
url: 'astral-power',
},
HOLY_POWER: {
// Paladin
id: 9,
name: 'Holy Power',
icon: 'achievement_bg_winsoa',
url: 'holy-power',
},
ALTERNATE_POWER: {
// Used for encounter-specific resources like Torment on Demonic Inqusition
id: 10,
name: 'Alternate Power',
icon: 'trade_engineering',
url: '',
|
MAELSTROM: {
// Shaman
id: 11,
name: 'Maelstrom',
icon: 'spell_fire_masterofelements',
url: 'maelstrom',
},
CHI: {
// Monk
id: 12,
name: 'Chi',
icon: 'ability_monk_healthsphere',
url: 'chi',
},
INSANITY: {
// Priest
id: 13,
name: 'Insanity',
icon: 'spell_priest_shadoworbs',
url: 'insanity',
},
// 14 is obsolete
// 15 is obsolete
ARCANE_CHARGES: {
// Mage
id: 16,
name: 'Arcane Charges',
icon: 'spell_arcane_arcane01',
url: 'arcane-charges',
},
FURY: {
// Demon Hunter
id: 17,
name: 'Fury',
icon: 'ability_demonhunter_eyebeam',
url: 'fury',
},
PAIN: {
// Demon Hunter
id: 18,
name: 'Pain',
icon: 'ability_demonhunter_demonspikes',
url: 'pain',
},
};
export default indexById(RESOURCE_TYPES);
export function getResource(classResources: ClassResources[] | undefined, type: number) {
if (!classResources) {
return undefined;
}
return classResources.find((resource) => resource.type === type);
}
|
},
|
random_line_split
|
package.py
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xgc(AutotoolsPackage):
|
"""xgc is an X11 graphics demo that shows various features of the X11
core protocol graphics primitives."""
homepage = "http://cgit.freedesktop.org/xorg/app/xgc"
url = "https://www.x.org/archive/individual/app/xgc-1.0.5.tar.gz"
version('1.0.5', '605557a9c138f6dc848c87a21bc7c7fc')
depends_on('libxaw')
depends_on('libxt')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
identifier_body
|
|
package.py
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class
|
(AutotoolsPackage):
"""xgc is an X11 graphics demo that shows various features of the X11
core protocol graphics primitives."""
homepage = "http://cgit.freedesktop.org/xorg/app/xgc"
url = "https://www.x.org/archive/individual/app/xgc-1.0.5.tar.gz"
version('1.0.5', '605557a9c138f6dc848c87a21bc7c7fc')
depends_on('libxaw')
depends_on('libxt')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
Xgc
|
identifier_name
|
package.py
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
|
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xgc(AutotoolsPackage):
"""xgc is an X11 graphics demo that shows various features of the X11
core protocol graphics primitives."""
homepage = "http://cgit.freedesktop.org/xorg/app/xgc"
url = "https://www.x.org/archive/individual/app/xgc-1.0.5.tar.gz"
version('1.0.5', '605557a9c138f6dc848c87a21bc7c7fc')
depends_on('libxaw')
depends_on('libxt')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
random_line_split
|
|
__init__.py
|
# -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2006-2012 Johan Dahlin
#
# glib/__init__.py: initialisation file for glib module
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from . import _glib
# Internal API
_PyGLib_API = _glib._PyGLib_API
# Types
GError = _glib.GError
IOChannel = _glib.IOChannel
Idle = _glib.Idle
MainContext = _glib.MainContext
MainLoop = _glib.MainLoop
OptionContext = _glib.OptionContext
OptionGroup = _glib.OptionGroup
Pid = _glib.Pid
PollFD = _glib.PollFD
Source = _glib.Source
Timeout = _glib.Timeout
# Constants
IO_ERR = _glib.IO_ERR
IO_FLAG_APPEND = _glib.IO_FLAG_APPEND
IO_FLAG_GET_MASK = _glib.IO_FLAG_GET_MASK
IO_FLAG_IS_READABLE = _glib.IO_FLAG_IS_READABLE
IO_FLAG_IS_SEEKABLE = _glib.IO_FLAG_IS_SEEKABLE
IO_FLAG_IS_WRITEABLE = _glib.IO_FLAG_IS_WRITEABLE
IO_FLAG_MASK = _glib.IO_FLAG_MASK
IO_FLAG_NONBLOCK = _glib.IO_FLAG_NONBLOCK
IO_FLAG_SET_MASK = _glib.IO_FLAG_SET_MASK
IO_HUP = _glib.IO_HUP
IO_IN = _glib.IO_IN
IO_NVAL = _glib.IO_NVAL
IO_OUT = _glib.IO_OUT
IO_PRI = _glib.IO_PRI
IO_STATUS_AGAIN = _glib.IO_STATUS_AGAIN
IO_STATUS_EOF = _glib.IO_STATUS_EOF
IO_STATUS_ERROR = _glib.IO_STATUS_ERROR
IO_STATUS_NORMAL = _glib.IO_STATUS_NORMAL
OPTION_ERROR = _glib.OPTION_ERROR
OPTION_ERROR_BAD_VALUE = _glib.OPTION_ERROR_BAD_VALUE
OPTION_ERROR_FAILED = _glib.OPTION_ERROR_FAILED
OPTION_ERROR_UNKNOWN_OPTION = _glib.OPTION_ERROR_UNKNOWN_OPTION
OPTION_FLAG_FILENAME = _glib.OPTION_FLAG_FILENAME
OPTION_FLAG_HIDDEN = _glib.OPTION_FLAG_HIDDEN
OPTION_FLAG_IN_MAIN = _glib.OPTION_FLAG_IN_MAIN
OPTION_FLAG_NOALIAS = _glib.OPTION_FLAG_NOALIAS
OPTION_FLAG_NO_ARG = _glib.OPTION_FLAG_NO_ARG
OPTION_FLAG_OPTIONAL_ARG = _glib.OPTION_FLAG_OPTIONAL_ARG
OPTION_FLAG_REVERSE = _glib.OPTION_FLAG_REVERSE
OPTION_REMAINING = _glib.OPTION_REMAINING
PRIORITY_DEFAULT = _glib.PRIORITY_DEFAULT
PRIORITY_DEFAULT_IDLE = _glib.PRIORITY_DEFAULT_IDLE
PRIORITY_HIGH = _glib.PRIORITY_HIGH
PRIORITY_HIGH_IDLE = _glib.PRIORITY_HIGH_IDLE
PRIORITY_LOW = _glib.PRIORITY_LOW
SPAWN_CHILD_INHERITS_STDIN = _glib.SPAWN_CHILD_INHERITS_STDIN
|
SPAWN_STDERR_TO_DEV_NULL = _glib.SPAWN_STDERR_TO_DEV_NULL
SPAWN_STDOUT_TO_DEV_NULL = _glib.SPAWN_STDOUT_TO_DEV_NULL
USER_DIRECTORY_DESKTOP = _glib.USER_DIRECTORY_DESKTOP
USER_DIRECTORY_DOCUMENTS = _glib.USER_DIRECTORY_DOCUMENTS
USER_DIRECTORY_DOWNLOAD = _glib.USER_DIRECTORY_DOWNLOAD
USER_DIRECTORY_MUSIC = _glib.USER_DIRECTORY_MUSIC
USER_DIRECTORY_PICTURES = _glib.USER_DIRECTORY_PICTURES
USER_DIRECTORY_PUBLIC_SHARE = _glib.USER_DIRECTORY_PUBLIC_SHARE
USER_DIRECTORY_TEMPLATES = _glib.USER_DIRECTORY_TEMPLATES
USER_DIRECTORY_VIDEOS = _glib.USER_DIRECTORY_VIDEOS
# Functions
child_watch_add = _glib.child_watch_add
filename_display_basename = _glib.filename_display_basename
filename_display_name = _glib.filename_display_name
filename_from_utf8 = _glib.filename_from_utf8
find_program_in_path = _glib.find_program_in_path
get_application_name = _glib.get_application_name
get_current_time = _glib.get_current_time
get_prgname = _glib.get_prgname
get_system_config_dirs = _glib.get_system_config_dirs
get_system_data_dirs = _glib.get_system_data_dirs
get_user_cache_dir = _glib.get_user_cache_dir
get_user_config_dir = _glib.get_user_config_dir
get_user_data_dir = _glib.get_user_data_dir
get_user_special_dir = _glib.get_user_special_dir
glib_version = _glib.glib_version
idle_add = _glib.idle_add
io_add_watch = _glib.io_add_watch
main_context_default = _glib.main_context_default
main_depth = _glib.main_depth
markup_escape_text = _glib.markup_escape_text
pyglib_version = _glib.pyglib_version
set_application_name = _glib.set_application_name
set_prgname = _glib.set_prgname
source_remove = _glib.source_remove
spawn_async = _glib.spawn_async
threads_init = _glib.threads_init
timeout_add = _glib.timeout_add
timeout_add_seconds = _glib.timeout_add_seconds
uri_list_extract_uris = _glib.uri_list_extract_uris
|
SPAWN_DO_NOT_REAP_CHILD = _glib.SPAWN_DO_NOT_REAP_CHILD
SPAWN_FILE_AND_ARGV_ZERO = _glib.SPAWN_FILE_AND_ARGV_ZERO
SPAWN_LEAVE_DESCRIPTORS_OPEN = _glib.SPAWN_LEAVE_DESCRIPTORS_OPEN
SPAWN_SEARCH_PATH = _glib.SPAWN_SEARCH_PATH
|
random_line_split
|
macRes.py
|
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
|
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
|
identifier_body
|
macRes.py
|
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
|
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
random_line_split
|
|
macRes.py
|
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
|
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
self.file = fileOrPath
|
conditional_block
|
macRes.py
|
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def
|
(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
close
|
identifier_name
|
pages.tsx
|
import Layout from 'components/layout';
import {Table} from 'components/table';
import * as API from 'lib/api';
import * as Page from 'lib/page';
import * as React from 'react';
import {Link} from 'react-router-dom';
interface State {
pages: API.Page[];
viewOption: API.ListPageRequest_ListPageFilter;
loading: boolean;
}
export default class PagesPage extends React.Component<{}, State> {
constructor(props: any) {
super(props);
this.state = {
pages: [],
loading: true,
viewOption: 'all',
};
}
componentDidMount() {
this.fetch(this.state.viewOption);
}
fetch = (val: API.ListPageRequest_ListPageFilter) => {
return Page.list(val).then((pages) => {
this.setState({
pages,
viewOption: val,
loading: false,
});
});
};
render() {
let tab = (v: API.ListPageRequest_ListPageFilter, desc?: string) => {
let classes = 'tab-el';
if (this.state.viewOption == v) {
classes += ' tab-selected';
}
return (
<span className={classes} onClick={() => this.fetch(v)}>
{desc || v}
</span>
);
};
return (
<Layout className="pages">
<header>
<Link className="button button--green button--center" to="/compose">
Compose
</Link>
<h1>Pages</h1>
</header>
<h2 className="tabs">
{tab('all')}
<span className="tab-divider">|</span>
{tab('draft')}
<span className="tab-divider">|</span>
{tab('published')}
</h2>
<Table loading={this.state.loading}>
{this.state.pages.map((page) => {
let status = null;
let klass = '';
if (Page.isPublished(page))
|
else {
status = <div className="label label--gray small">draft</div>;
klass = 'page--draft';
}
return (
<Link key={page.uuid} className="tr tr--center" to={`/pages/${page.uuid}`}>
<div className={`tr__expand ${klass}`}>{page.title || 'untitled'}</div>
{status}
<div className="page--date">{`${Page.formattedUpdatedAt(page)}`}</div>
</Link>
);
})}
</Table>
</Layout>
);
}
}
|
{
status = <div className="label small">published</div>;
}
|
conditional_block
|
pages.tsx
|
import Layout from 'components/layout';
import {Table} from 'components/table';
import * as API from 'lib/api';
import * as Page from 'lib/page';
import * as React from 'react';
import {Link} from 'react-router-dom';
interface State {
pages: API.Page[];
viewOption: API.ListPageRequest_ListPageFilter;
loading: boolean;
}
export default class PagesPage extends React.Component<{}, State> {
|
(props: any) {
super(props);
this.state = {
pages: [],
loading: true,
viewOption: 'all',
};
}
componentDidMount() {
this.fetch(this.state.viewOption);
}
fetch = (val: API.ListPageRequest_ListPageFilter) => {
return Page.list(val).then((pages) => {
this.setState({
pages,
viewOption: val,
loading: false,
});
});
};
render() {
let tab = (v: API.ListPageRequest_ListPageFilter, desc?: string) => {
let classes = 'tab-el';
if (this.state.viewOption == v) {
classes += ' tab-selected';
}
return (
<span className={classes} onClick={() => this.fetch(v)}>
{desc || v}
</span>
);
};
return (
<Layout className="pages">
<header>
<Link className="button button--green button--center" to="/compose">
Compose
</Link>
<h1>Pages</h1>
</header>
<h2 className="tabs">
{tab('all')}
<span className="tab-divider">|</span>
{tab('draft')}
<span className="tab-divider">|</span>
{tab('published')}
</h2>
<Table loading={this.state.loading}>
{this.state.pages.map((page) => {
let status = null;
let klass = '';
if (Page.isPublished(page)) {
status = <div className="label small">published</div>;
} else {
status = <div className="label label--gray small">draft</div>;
klass = 'page--draft';
}
return (
<Link key={page.uuid} className="tr tr--center" to={`/pages/${page.uuid}`}>
<div className={`tr__expand ${klass}`}>{page.title || 'untitled'}</div>
{status}
<div className="page--date">{`${Page.formattedUpdatedAt(page)}`}</div>
</Link>
);
})}
</Table>
</Layout>
);
}
}
|
constructor
|
identifier_name
|
pages.tsx
|
import Layout from 'components/layout';
import {Table} from 'components/table';
import * as API from 'lib/api';
import * as Page from 'lib/page';
import * as React from 'react';
import {Link} from 'react-router-dom';
interface State {
pages: API.Page[];
viewOption: API.ListPageRequest_ListPageFilter;
loading: boolean;
}
export default class PagesPage extends React.Component<{}, State> {
constructor(props: any) {
super(props);
this.state = {
pages: [],
loading: true,
viewOption: 'all',
};
}
componentDidMount() {
this.fetch(this.state.viewOption);
}
fetch = (val: API.ListPageRequest_ListPageFilter) => {
return Page.list(val).then((pages) => {
this.setState({
pages,
viewOption: val,
loading: false,
});
});
};
render()
|
</span>
);
};
return (
<Layout className="pages">
<header>
<Link className="button button--green button--center" to="/compose">
Compose
</Link>
<h1>Pages</h1>
</header>
<h2 className="tabs">
{tab('all')}
<span className="tab-divider">|</span>
{tab('draft')}
<span className="tab-divider">|</span>
{tab('published')}
</h2>
<Table loading={this.state.loading}>
{this.state.pages.map((page) => {
let status = null;
let klass = '';
if (Page.isPublished(page)) {
status = <div className="label small">published</div>;
} else {
status = <div className="label label--gray small">draft</div>;
klass = 'page--draft';
}
return (
<Link key={page.uuid} className="tr tr--center" to={`/pages/${page.uuid}`}>
<div className={`tr__expand ${klass}`}>{page.title || 'untitled'}</div>
{status}
<div className="page--date">{`${Page.formattedUpdatedAt(page)}`}</div>
</Link>
);
})}
</Table>
</Layout>
);
}
}
|
{
let tab = (v: API.ListPageRequest_ListPageFilter, desc?: string) => {
let classes = 'tab-el';
if (this.state.viewOption == v) {
classes += ' tab-selected';
}
return (
<span className={classes} onClick={() => this.fetch(v)}>
{desc || v}
|
identifier_body
|
pages.tsx
|
import Layout from 'components/layout';
import {Table} from 'components/table';
import * as API from 'lib/api';
import * as Page from 'lib/page';
import * as React from 'react';
import {Link} from 'react-router-dom';
interface State {
pages: API.Page[];
viewOption: API.ListPageRequest_ListPageFilter;
loading: boolean;
}
export default class PagesPage extends React.Component<{}, State> {
constructor(props: any) {
super(props);
this.state = {
pages: [],
loading: true,
viewOption: 'all',
};
}
componentDidMount() {
this.fetch(this.state.viewOption);
}
fetch = (val: API.ListPageRequest_ListPageFilter) => {
return Page.list(val).then((pages) => {
this.setState({
pages,
viewOption: val,
loading: false,
});
});
};
render() {
let tab = (v: API.ListPageRequest_ListPageFilter, desc?: string) => {
let classes = 'tab-el';
if (this.state.viewOption == v) {
classes += ' tab-selected';
}
return (
<span className={classes} onClick={() => this.fetch(v)}>
{desc || v}
</span>
);
};
return (
<Layout className="pages">
<header>
<Link className="button button--green button--center" to="/compose">
Compose
</Link>
<h1>Pages</h1>
</header>
<h2 className="tabs">
{tab('all')}
<span className="tab-divider">|</span>
{tab('draft')}
<span className="tab-divider">|</span>
{tab('published')}
</h2>
<Table loading={this.state.loading}>
{this.state.pages.map((page) => {
let status = null;
let klass = '';
if (Page.isPublished(page)) {
status = <div className="label small">published</div>;
} else {
status = <div className="label label--gray small">draft</div>;
klass = 'page--draft';
}
return (
<Link key={page.uuid} className="tr tr--center" to={`/pages/${page.uuid}`}>
<div className={`tr__expand ${klass}`}>{page.title || 'untitled'}</div>
{status}
<div className="page--date">{`${Page.formattedUpdatedAt(page)}`}</div>
</Link>
);
})}
|
}
|
</Table>
</Layout>
);
}
|
random_line_split
|
1395_count-number-of-teams.py
|
# 1395. Count Number of Teams - LeetCode
# https://leetcode.com/problems/count-number-of-teams/
from typing import List
# 暴力搜索都 AC 了
# 其实有两次筛选的算法
class Solution:
def numTeams(self, rating: List[int]) -> int:
if len(rating) <= 2:
|
= [1,2,3,4]
s = Solution()
ret = s.numTeams(rating)
print(ret)
|
return 0
count = 0
for i in range(len(rating)):
for j in range(i+1,len(rating)):
for k in range(j+1,len(rating)):
if rating[i] < rating[j] and rating[j] < rating[k]:
count += 1
if rating[i] > rating[j] and rating[j] > rating[k]:
count += 1
return count
# rating = [2,5,3,4,1]
rating
|
identifier_body
|
1395_count-number-of-teams.py
|
# 1395. Count Number of Teams - LeetCode
# https://leetcode.com/problems/count-number-of-teams/
from typing import List
|
# 其实有两次筛选的算法
class Solution:
def numTeams(self, rating: List[int]) -> int:
if len(rating) <= 2:
return 0
count = 0
for i in range(len(rating)):
for j in range(i+1,len(rating)):
for k in range(j+1,len(rating)):
if rating[i] < rating[j] and rating[j] < rating[k]:
count += 1
if rating[i] > rating[j] and rating[j] > rating[k]:
count += 1
return count
# rating = [2,5,3,4,1]
rating = [1,2,3,4]
s = Solution()
ret = s.numTeams(rating)
print(ret)
|
# 暴力搜索都 AC 了
|
random_line_split
|
1395_count-number-of-teams.py
|
# 1395. Count Number of Teams - LeetCode
# https://leetcode.com/problems/count-number-of-teams/
from typing import List
# 暴力搜索都 AC 了
# 其实有两次筛选的算法
class Solution:
def numTeams(self, rating: List[int]
|
:
if len(rating) <= 2:
return 0
count = 0
for i in range(len(rating)):
for j in range(i+1,len(rating)):
for k in range(j+1,len(rating)):
if rating[i] < rating[j] and rating[j] < rating[k]:
count += 1
if rating[i] > rating[j] and rating[j] > rating[k]:
count += 1
return count
# rating = [2,5,3,4,1]
rating = [1,2,3,4]
s = Solution()
ret = s.numTeams(rating)
print(ret)
|
) -> int
|
identifier_name
|
1395_count-number-of-teams.py
|
# 1395. Count Number of Teams - LeetCode
# https://leetcode.com/problems/count-number-of-teams/
from typing import List
# 暴力搜索都 AC 了
# 其实有两次筛选的算法
class Solution:
def numTeams(self, rating: List[int]) -> int:
if len(rating) <= 2:
return 0
count = 0
for i in range(len(rating)):
for j in range(i+1,len(rating)):
for k in range(j+1,len(rating)):
if rating[i] < rating[j] and rating[j] < rating[k]:
count += 1
if rating[i] > rating[j] and rating[j] > rating[k]:
count += 1
return count
|
= [2,5,3,4,1]
rating = [1,2,3,4]
s = Solution()
ret = s.numTeams(rating)
print(ret)
|
# rating
|
conditional_block
|
forms.py
|
from django import forms
from order.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
pizza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
|
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
|
pizza.toppings.add(topping)
|
conditional_block
|
forms.py
|
from django import forms
from order.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
|
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
pizza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
pizza.toppings.add(topping)
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
|
random_line_split
|
|
forms.py
|
from django import forms
from order.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
pizza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
pizza.toppings.add(topping)
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
|
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
|
identifier_body
|
|
forms.py
|
from django import forms
from order.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
pizza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
pizza.toppings.add(topping)
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class
|
(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
|
CustomerForm
|
identifier_name
|
url_mappings.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def __init__(self, src_root, build_dir):
|
@property
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s' % item, self.mappings.items())
|
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/internal.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
|
identifier_body
|
url_mappings.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def
|
(self, src_root, build_dir):
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/internal.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
@property
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s' % item, self.mappings.items())
|
__init__
|
identifier_name
|
url_mappings.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def __init__(self, src_root, build_dir):
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/internal.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
@property
|
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s' % item, self.mappings.items())
|
random_line_split
|
|
res_company.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class
|
(models.Model):
_inherit = "res.company"
@api.model
def create(self, vals):
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
pricelist = ProductPricelist.create({
'name': new_company.name,
'currency_id': new_company.currency_id.id,
})
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'property_product_pricelist')])
self.env['ir.property'].create({
'name': 'property_product_pricelist',
'company_id': new_company.id,
'value_reference': 'product.pricelist,%s' % pricelist.id,
'fields_id': field_id.id
})
return new_company
|
ResCompany
|
identifier_name
|
res_company.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class ResCompany(models.Model):
_inherit = "res.company"
@api.model
def create(self, vals):
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
|
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'property_product_pricelist')])
self.env['ir.property'].create({
'name': 'property_product_pricelist',
'company_id': new_company.id,
'value_reference': 'product.pricelist,%s' % pricelist.id,
'fields_id': field_id.id
})
return new_company
|
pricelist = ProductPricelist.create({
'name': new_company.name,
'currency_id': new_company.currency_id.id,
})
|
conditional_block
|
res_company.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class ResCompany(models.Model):
_inherit = "res.company"
@api.model
def create(self, vals):
|
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
pricelist = ProductPricelist.create({
'name': new_company.name,
'currency_id': new_company.currency_id.id,
})
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'property_product_pricelist')])
self.env['ir.property'].create({
'name': 'property_product_pricelist',
'company_id': new_company.id,
'value_reference': 'product.pricelist,%s' % pricelist.id,
'fields_id': field_id.id
})
return new_company
|
identifier_body
|
|
res_company.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
|
@api.model
def create(self, vals):
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
pricelist = ProductPricelist.create({
'name': new_company.name,
'currency_id': new_company.currency_id.id,
})
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'property_product_pricelist')])
self.env['ir.property'].create({
'name': 'property_product_pricelist',
'company_id': new_company.id,
'value_reference': 'product.pricelist,%s' % pricelist.id,
'fields_id': field_id.id
})
return new_company
|
class ResCompany(models.Model):
_inherit = "res.company"
|
random_line_split
|
fs.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use io::prelude::*;
use os::unix::prelude::*;
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
use io::{self, Error, SeekFrom};
use libc::{self, c_int, size_t, off_t, c_char, mode_t};
use mem;
use path::{Path, PathBuf};
use ptr;
use sync::Arc;
use sys::fd::FileDesc;
use sys::platform::raw;
use sys::{c, cvt, cvt_r};
use sys_common::{AsInner, FromInner};
use vec::Vec;
pub struct File(FileDesc);
pub struct FileAttr {
stat: raw::stat,
}
pub struct ReadDir {
dirp: Dir,
root: Arc<PathBuf>,
}
struct Dir(*mut libc::DIR);
unsafe impl Send for Dir {}
unsafe impl Sync for Dir {}
pub struct DirEntry {
buf: Vec<u8>, // actually *mut libc::dirent_t
root: Arc<PathBuf>,
}
#[derive(Clone)]
pub struct OpenOptions {
flags: c_int,
read: bool,
write: bool,
mode: mode_t,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions { mode: mode_t }
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct FileType { mode: mode_t }
pub struct DirBuilder { mode: mode_t }
impl FileAttr {
pub fn size(&self) -> u64 { self.stat.st_size as u64 }
pub fn perm(&self) -> FilePermissions {
FilePermissions { mode: (self.stat.st_mode as mode_t) & 0o777 }
}
pub fn accessed(&self) -> u64 {
self.mktime(self.stat.st_atime as u64, self.stat.st_atime_nsec as u64)
}
pub fn modified(&self) -> u64 {
self.mktime(self.stat.st_mtime as u64, self.stat.st_mtime_nsec as u64)
}
pub fn file_type(&self) -> FileType {
FileType { mode: self.stat.st_mode as mode_t }
}
pub fn raw(&self) -> &raw::stat
|
// times are in milliseconds (currently)
fn mktime(&self, secs: u64, nsecs: u64) -> u64 {
secs * 1000 + nsecs / 1000000
}
}
impl AsInner<raw::stat> for FileAttr {
fn as_inner(&self) -> &raw::stat { &self.stat }
}
#[unstable(feature = "metadata_ext", reason = "recently added API")]
pub trait MetadataExt {
fn as_raw_stat(&self) -> &raw::stat;
}
impl MetadataExt for ::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { &self.as_inner().stat }
}
impl MetadataExt for ::os::unix::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { self.as_inner() }
}
impl FilePermissions {
pub fn readonly(&self) -> bool { self.mode & 0o222 == 0 }
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
pub fn mode(&self) -> raw::mode_t { self.mode }
}
impl FileType {
pub fn is_dir(&self) -> bool { self.is(libc::S_IFDIR) }
pub fn is_file(&self) -> bool { self.is(libc::S_IFREG) }
pub fn is_symlink(&self) -> bool { self.is(libc::S_IFLNK) }
fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
}
impl FromInner<raw::mode_t> for FilePermissions {
fn from_inner(mode: raw::mode_t) -> FilePermissions {
FilePermissions { mode: mode as mode_t }
}
}
impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
extern {
fn rust_dirent_t_size() -> c_int;
}
let mut buf: Vec<u8> = Vec::with_capacity(unsafe {
rust_dirent_t_size() as usize
});
let ptr = buf.as_mut_ptr() as *mut libc::dirent_t;
let mut entry_ptr = ptr::null_mut();
loop {
if unsafe { libc::readdir_r(self.dirp.0, ptr, &mut entry_ptr) != 0 } {
return Some(Err(Error::last_os_error()))
}
if entry_ptr.is_null() {
return None
}
let entry = DirEntry {
buf: buf,
root: self.root.clone()
};
if entry.name_bytes() == b"." || entry.name_bytes() == b".." {
buf = entry.buf;
} else {
return Some(Ok(entry))
}
}
}
}
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
debug_assert_eq!(r, 0);
}
}
impl DirEntry {
pub fn path(&self) -> PathBuf {
self.root.join(<OsStr as OsStrExt>::from_bytes(self.name_bytes()))
}
pub fn file_name(&self) -> OsString {
OsStr::from_bytes(self.name_bytes()).to_os_string()
}
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
pub fn file_type(&self) -> io::Result<FileType> {
extern {
fn rust_dir_get_mode(ptr: *mut libc::dirent_t) -> c_int;
}
unsafe {
match rust_dir_get_mode(self.dirent()) {
-1 => lstat(&self.path()).map(|m| m.file_type()),
n => Ok(FileType { mode: n as mode_t }),
}
}
}
pub fn ino(&self) -> raw::ino_t {
extern {
fn rust_dir_get_ino(ptr: *mut libc::dirent_t) -> raw::ino_t;
}
unsafe { rust_dir_get_ino(self.dirent()) }
}
fn name_bytes(&self) -> &[u8] {
extern {
fn rust_list_dir_val(ptr: *mut libc::dirent_t) -> *const c_char;
}
unsafe {
CStr::from_ptr(rust_list_dir_val(self.dirent())).to_bytes()
}
}
fn dirent(&self) -> *mut libc::dirent_t {
self.buf.as_ptr() as *mut _
}
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
flags: 0,
read: false,
write: false,
mode: 0o666,
}
}
pub fn read(&mut self, read: bool) {
self.read = read;
}
pub fn write(&mut self, write: bool) {
self.write = write;
}
pub fn append(&mut self, append: bool) {
self.flag(libc::O_APPEND, append);
}
pub fn truncate(&mut self, truncate: bool) {
self.flag(libc::O_TRUNC, truncate);
}
pub fn create(&mut self, create: bool) {
self.flag(libc::O_CREAT, create);
}
pub fn mode(&mut self, mode: raw::mode_t) {
self.mode = mode as mode_t;
}
fn flag(&mut self, bit: c_int, on: bool) {
if on {
self.flags |= bit;
} else {
self.flags &= !bit;
}
}
}
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = try!(cstr(path));
File::open_c(&path, opts)
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
let flags = opts.flags | match (opts.read, opts.write) {
(true, true) => libc::O_RDWR,
(false, true) => libc::O_WRONLY,
(true, false) |
(false, false) => libc::O_RDONLY,
};
let fd = try!(cvt_r(|| unsafe {
libc::open(path.as_ptr(), flags, opts.mode)
}));
let fd = FileDesc::new(fd);
fd.set_cloexec();
Ok(File(fd))
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::fstat(self.0.raw(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { libc::fsync(self.0.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { os_datasync(self.0.raw()) }));
return Ok(());
#[cfg(any(target_os = "macos", target_os = "ios"))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fcntl(fd, libc::F_FULLFSYNC)
}
#[cfg(target_os = "linux")]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fdatasync(fd) }
#[cfg(not(any(target_os = "macos",
target_os = "ios",
target_os = "linux")))]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fsync(fd) }
}
pub fn truncate(&self, size: u64) -> io::Result<()> {
try!(cvt_r(|| unsafe {
libc::ftruncate(self.0.raw(), size as libc::off_t)
}));
Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(off) => (libc::SEEK_SET, off as off_t),
SeekFrom::End(off) => (libc::SEEK_END, off as off_t),
SeekFrom::Current(off) => (libc::SEEK_CUR, off as off_t),
};
let n = try!(cvt(unsafe { libc::lseek(self.0.raw(), pos, whence) }));
Ok(n as u64)
}
pub fn fd(&self) -> &FileDesc { &self.0 }
}
impl DirBuilder {
pub fn new() -> DirBuilder {
DirBuilder { mode: 0o777 }
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }));
Ok(())
}
pub fn set_mode(&mut self, mode: mode_t) {
self.mode = mode;
}
}
fn cstr(path: &Path) -> io::Result<CString> {
path.as_os_str().to_cstring().ok_or(
io::Error::new(io::ErrorKind::InvalidInput, "path contained a null"))
}
impl FromInner<c_int> for File {
fn from_inner(fd: c_int) -> File {
File(FileDesc::new(fd))
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(target_os = "linux")]
fn get_path(fd: c_int) -> Option<PathBuf> {
use string::ToString;
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
readlink(&p).ok()
}
#[cfg(not(target_os = "linux"))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
#[cfg(target_os = "linux")]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
return None;
}
match mode & libc::O_ACCMODE {
libc::O_RDONLY => Some((true, false)),
libc::O_RDWR => Some((true, true)),
libc::O_WRONLY => Some((false, true)),
_ => None
}
}
#[cfg(not(target_os = "linux"))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
}
let fd = self.0.raw();
let mut b = f.debug_struct("File");
b.field("fd", &fd);
if let Some(path) = get_path(fd) {
b.field("path", &path);
}
if let Some((read, write)) = get_mode(fd) {
b.field("read", &read).field("write", &write);
}
b.finish()
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = Arc::new(p.to_path_buf());
let p = try!(cstr(p));
unsafe {
let ptr = libc::opendir(p.as_ptr());
if ptr.is_null() {
Err(Error::last_os_error())
} else {
Ok(ReadDir { dirp: Dir(ptr), root: root })
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::unlink(p.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = try!(cstr(old));
let new = try!(cstr(new));
try!(cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }));
Ok(())
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::rmdir(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let c_path = try!(cstr(p));
let p = c_path.as_ptr();
let mut len = unsafe { libc::pathconf(p as *mut _, libc::_PC_NAME_MAX) };
if len < 0 {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as usize);
unsafe {
let n = try!(cvt({
libc::readlink(p, buf.as_ptr() as *mut c_char, len as size_t)
}));
buf.set_len(n as usize);
}
Ok(PathBuf::from(OsString::from_vec(buf)))
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::stat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::lstat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn utimes(p: &Path, atime: u64, mtime: u64) -> io::Result<()> {
let p = try!(cstr(p));
let buf = [super::ms_to_timeval(atime), super::ms_to_timeval(mtime)];
try!(cvt(unsafe { c::utimes(p.as_ptr(), buf.as_ptr()) }));
Ok(())
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let path = try!(CString::new(p.as_os_str().as_bytes()));
let mut buf = vec![0u8; 16 * 1024];
unsafe {
let r = c::realpath(path.as_ptr(), buf.as_mut_ptr() as *mut _);
if r.is_null() {
return Err(io::Error::last_os_error())
}
}
let p = buf.iter().position(|i| *i == 0).unwrap();
buf.truncate(p);
Ok(PathBuf::from(OsString::from_vec(buf)))
}
|
{ &self.stat }
|
identifier_body
|
fs.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use io::prelude::*;
use os::unix::prelude::*;
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
use io::{self, Error, SeekFrom};
use libc::{self, c_int, size_t, off_t, c_char, mode_t};
use mem;
use path::{Path, PathBuf};
use ptr;
use sync::Arc;
use sys::fd::FileDesc;
use sys::platform::raw;
use sys::{c, cvt, cvt_r};
use sys_common::{AsInner, FromInner};
use vec::Vec;
pub struct File(FileDesc);
pub struct FileAttr {
stat: raw::stat,
}
pub struct ReadDir {
dirp: Dir,
root: Arc<PathBuf>,
}
struct Dir(*mut libc::DIR);
unsafe impl Send for Dir {}
unsafe impl Sync for Dir {}
pub struct DirEntry {
buf: Vec<u8>, // actually *mut libc::dirent_t
root: Arc<PathBuf>,
}
#[derive(Clone)]
pub struct OpenOptions {
flags: c_int,
read: bool,
write: bool,
mode: mode_t,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions { mode: mode_t }
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct FileType { mode: mode_t }
pub struct DirBuilder { mode: mode_t }
impl FileAttr {
pub fn size(&self) -> u64 { self.stat.st_size as u64 }
pub fn perm(&self) -> FilePermissions {
FilePermissions { mode: (self.stat.st_mode as mode_t) & 0o777 }
}
pub fn accessed(&self) -> u64 {
self.mktime(self.stat.st_atime as u64, self.stat.st_atime_nsec as u64)
}
pub fn modified(&self) -> u64 {
self.mktime(self.stat.st_mtime as u64, self.stat.st_mtime_nsec as u64)
}
pub fn file_type(&self) -> FileType {
FileType { mode: self.stat.st_mode as mode_t }
}
pub fn raw(&self) -> &raw::stat { &self.stat }
// times are in milliseconds (currently)
fn mktime(&self, secs: u64, nsecs: u64) -> u64 {
secs * 1000 + nsecs / 1000000
}
}
impl AsInner<raw::stat> for FileAttr {
fn as_inner(&self) -> &raw::stat { &self.stat }
}
#[unstable(feature = "metadata_ext", reason = "recently added API")]
pub trait MetadataExt {
fn as_raw_stat(&self) -> &raw::stat;
}
impl MetadataExt for ::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { &self.as_inner().stat }
}
impl MetadataExt for ::os::unix::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { self.as_inner() }
}
impl FilePermissions {
pub fn readonly(&self) -> bool { self.mode & 0o222 == 0 }
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
pub fn mode(&self) -> raw::mode_t { self.mode }
}
impl FileType {
pub fn is_dir(&self) -> bool { self.is(libc::S_IFDIR) }
pub fn is_file(&self) -> bool { self.is(libc::S_IFREG) }
pub fn is_symlink(&self) -> bool { self.is(libc::S_IFLNK) }
fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
}
impl FromInner<raw::mode_t> for FilePermissions {
fn from_inner(mode: raw::mode_t) -> FilePermissions {
FilePermissions { mode: mode as mode_t }
}
}
impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
extern {
fn rust_dirent_t_size() -> c_int;
}
let mut buf: Vec<u8> = Vec::with_capacity(unsafe {
rust_dirent_t_size() as usize
});
let ptr = buf.as_mut_ptr() as *mut libc::dirent_t;
let mut entry_ptr = ptr::null_mut();
loop {
if unsafe { libc::readdir_r(self.dirp.0, ptr, &mut entry_ptr) != 0 } {
return Some(Err(Error::last_os_error()))
}
if entry_ptr.is_null() {
return None
}
let entry = DirEntry {
buf: buf,
root: self.root.clone()
};
if entry.name_bytes() == b"." || entry.name_bytes() == b".." {
buf = entry.buf;
} else {
return Some(Ok(entry))
}
}
}
}
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
debug_assert_eq!(r, 0);
}
}
impl DirEntry {
pub fn path(&self) -> PathBuf {
self.root.join(<OsStr as OsStrExt>::from_bytes(self.name_bytes()))
}
pub fn file_name(&self) -> OsString {
OsStr::from_bytes(self.name_bytes()).to_os_string()
}
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
pub fn file_type(&self) -> io::Result<FileType> {
extern {
fn rust_dir_get_mode(ptr: *mut libc::dirent_t) -> c_int;
}
unsafe {
match rust_dir_get_mode(self.dirent()) {
-1 => lstat(&self.path()).map(|m| m.file_type()),
n => Ok(FileType { mode: n as mode_t }),
}
}
}
pub fn ino(&self) -> raw::ino_t {
extern {
fn rust_dir_get_ino(ptr: *mut libc::dirent_t) -> raw::ino_t;
}
unsafe { rust_dir_get_ino(self.dirent()) }
}
fn name_bytes(&self) -> &[u8] {
extern {
fn rust_list_dir_val(ptr: *mut libc::dirent_t) -> *const c_char;
}
unsafe {
CStr::from_ptr(rust_list_dir_val(self.dirent())).to_bytes()
}
}
fn dirent(&self) -> *mut libc::dirent_t {
self.buf.as_ptr() as *mut _
}
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
flags: 0,
read: false,
write: false,
mode: 0o666,
}
}
pub fn read(&mut self, read: bool) {
self.read = read;
}
pub fn write(&mut self, write: bool) {
self.write = write;
}
pub fn append(&mut self, append: bool) {
self.flag(libc::O_APPEND, append);
}
pub fn truncate(&mut self, truncate: bool) {
self.flag(libc::O_TRUNC, truncate);
}
pub fn
|
(&mut self, create: bool) {
self.flag(libc::O_CREAT, create);
}
pub fn mode(&mut self, mode: raw::mode_t) {
self.mode = mode as mode_t;
}
fn flag(&mut self, bit: c_int, on: bool) {
if on {
self.flags |= bit;
} else {
self.flags &= !bit;
}
}
}
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = try!(cstr(path));
File::open_c(&path, opts)
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
let flags = opts.flags | match (opts.read, opts.write) {
(true, true) => libc::O_RDWR,
(false, true) => libc::O_WRONLY,
(true, false) |
(false, false) => libc::O_RDONLY,
};
let fd = try!(cvt_r(|| unsafe {
libc::open(path.as_ptr(), flags, opts.mode)
}));
let fd = FileDesc::new(fd);
fd.set_cloexec();
Ok(File(fd))
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::fstat(self.0.raw(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { libc::fsync(self.0.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { os_datasync(self.0.raw()) }));
return Ok(());
#[cfg(any(target_os = "macos", target_os = "ios"))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fcntl(fd, libc::F_FULLFSYNC)
}
#[cfg(target_os = "linux")]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fdatasync(fd) }
#[cfg(not(any(target_os = "macos",
target_os = "ios",
target_os = "linux")))]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fsync(fd) }
}
pub fn truncate(&self, size: u64) -> io::Result<()> {
try!(cvt_r(|| unsafe {
libc::ftruncate(self.0.raw(), size as libc::off_t)
}));
Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(off) => (libc::SEEK_SET, off as off_t),
SeekFrom::End(off) => (libc::SEEK_END, off as off_t),
SeekFrom::Current(off) => (libc::SEEK_CUR, off as off_t),
};
let n = try!(cvt(unsafe { libc::lseek(self.0.raw(), pos, whence) }));
Ok(n as u64)
}
pub fn fd(&self) -> &FileDesc { &self.0 }
}
impl DirBuilder {
pub fn new() -> DirBuilder {
DirBuilder { mode: 0o777 }
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }));
Ok(())
}
pub fn set_mode(&mut self, mode: mode_t) {
self.mode = mode;
}
}
fn cstr(path: &Path) -> io::Result<CString> {
path.as_os_str().to_cstring().ok_or(
io::Error::new(io::ErrorKind::InvalidInput, "path contained a null"))
}
impl FromInner<c_int> for File {
fn from_inner(fd: c_int) -> File {
File(FileDesc::new(fd))
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(target_os = "linux")]
fn get_path(fd: c_int) -> Option<PathBuf> {
use string::ToString;
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
readlink(&p).ok()
}
#[cfg(not(target_os = "linux"))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
#[cfg(target_os = "linux")]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
return None;
}
match mode & libc::O_ACCMODE {
libc::O_RDONLY => Some((true, false)),
libc::O_RDWR => Some((true, true)),
libc::O_WRONLY => Some((false, true)),
_ => None
}
}
#[cfg(not(target_os = "linux"))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
}
let fd = self.0.raw();
let mut b = f.debug_struct("File");
b.field("fd", &fd);
if let Some(path) = get_path(fd) {
b.field("path", &path);
}
if let Some((read, write)) = get_mode(fd) {
b.field("read", &read).field("write", &write);
}
b.finish()
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = Arc::new(p.to_path_buf());
let p = try!(cstr(p));
unsafe {
let ptr = libc::opendir(p.as_ptr());
if ptr.is_null() {
Err(Error::last_os_error())
} else {
Ok(ReadDir { dirp: Dir(ptr), root: root })
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::unlink(p.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = try!(cstr(old));
let new = try!(cstr(new));
try!(cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }));
Ok(())
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::rmdir(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let c_path = try!(cstr(p));
let p = c_path.as_ptr();
let mut len = unsafe { libc::pathconf(p as *mut _, libc::_PC_NAME_MAX) };
if len < 0 {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as usize);
unsafe {
let n = try!(cvt({
libc::readlink(p, buf.as_ptr() as *mut c_char, len as size_t)
}));
buf.set_len(n as usize);
}
Ok(PathBuf::from(OsString::from_vec(buf)))
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::stat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::lstat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn utimes(p: &Path, atime: u64, mtime: u64) -> io::Result<()> {
let p = try!(cstr(p));
let buf = [super::ms_to_timeval(atime), super::ms_to_timeval(mtime)];
try!(cvt(unsafe { c::utimes(p.as_ptr(), buf.as_ptr()) }));
Ok(())
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let path = try!(CString::new(p.as_os_str().as_bytes()));
let mut buf = vec![0u8; 16 * 1024];
unsafe {
let r = c::realpath(path.as_ptr(), buf.as_mut_ptr() as *mut _);
if r.is_null() {
return Err(io::Error::last_os_error())
}
}
let p = buf.iter().position(|i| *i == 0).unwrap();
buf.truncate(p);
Ok(PathBuf::from(OsString::from_vec(buf)))
}
|
create
|
identifier_name
|
fs.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use io::prelude::*;
use os::unix::prelude::*;
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
use io::{self, Error, SeekFrom};
use libc::{self, c_int, size_t, off_t, c_char, mode_t};
use mem;
use path::{Path, PathBuf};
use ptr;
use sync::Arc;
use sys::fd::FileDesc;
use sys::platform::raw;
use sys::{c, cvt, cvt_r};
use sys_common::{AsInner, FromInner};
use vec::Vec;
pub struct File(FileDesc);
pub struct FileAttr {
stat: raw::stat,
}
pub struct ReadDir {
dirp: Dir,
root: Arc<PathBuf>,
}
struct Dir(*mut libc::DIR);
unsafe impl Send for Dir {}
unsafe impl Sync for Dir {}
pub struct DirEntry {
buf: Vec<u8>, // actually *mut libc::dirent_t
root: Arc<PathBuf>,
}
#[derive(Clone)]
pub struct OpenOptions {
flags: c_int,
read: bool,
write: bool,
mode: mode_t,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions { mode: mode_t }
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct FileType { mode: mode_t }
pub struct DirBuilder { mode: mode_t }
impl FileAttr {
pub fn size(&self) -> u64 { self.stat.st_size as u64 }
pub fn perm(&self) -> FilePermissions {
FilePermissions { mode: (self.stat.st_mode as mode_t) & 0o777 }
}
pub fn accessed(&self) -> u64 {
self.mktime(self.stat.st_atime as u64, self.stat.st_atime_nsec as u64)
}
pub fn modified(&self) -> u64 {
self.mktime(self.stat.st_mtime as u64, self.stat.st_mtime_nsec as u64)
}
pub fn file_type(&self) -> FileType {
FileType { mode: self.stat.st_mode as mode_t }
}
pub fn raw(&self) -> &raw::stat { &self.stat }
// times are in milliseconds (currently)
fn mktime(&self, secs: u64, nsecs: u64) -> u64 {
secs * 1000 + nsecs / 1000000
}
}
impl AsInner<raw::stat> for FileAttr {
fn as_inner(&self) -> &raw::stat { &self.stat }
}
#[unstable(feature = "metadata_ext", reason = "recently added API")]
pub trait MetadataExt {
fn as_raw_stat(&self) -> &raw::stat;
}
impl MetadataExt for ::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { &self.as_inner().stat }
}
impl MetadataExt for ::os::unix::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { self.as_inner() }
}
impl FilePermissions {
pub fn readonly(&self) -> bool { self.mode & 0o222 == 0 }
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
pub fn mode(&self) -> raw::mode_t { self.mode }
}
impl FileType {
pub fn is_dir(&self) -> bool { self.is(libc::S_IFDIR) }
pub fn is_file(&self) -> bool { self.is(libc::S_IFREG) }
pub fn is_symlink(&self) -> bool { self.is(libc::S_IFLNK) }
fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
}
impl FromInner<raw::mode_t> for FilePermissions {
fn from_inner(mode: raw::mode_t) -> FilePermissions {
FilePermissions { mode: mode as mode_t }
}
}
impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
extern {
fn rust_dirent_t_size() -> c_int;
}
let mut buf: Vec<u8> = Vec::with_capacity(unsafe {
rust_dirent_t_size() as usize
});
let ptr = buf.as_mut_ptr() as *mut libc::dirent_t;
let mut entry_ptr = ptr::null_mut();
loop {
if unsafe { libc::readdir_r(self.dirp.0, ptr, &mut entry_ptr) != 0 } {
return Some(Err(Error::last_os_error()))
}
if entry_ptr.is_null()
|
let entry = DirEntry {
buf: buf,
root: self.root.clone()
};
if entry.name_bytes() == b"." || entry.name_bytes() == b".." {
buf = entry.buf;
} else {
return Some(Ok(entry))
}
}
}
}
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
debug_assert_eq!(r, 0);
}
}
impl DirEntry {
pub fn path(&self) -> PathBuf {
self.root.join(<OsStr as OsStrExt>::from_bytes(self.name_bytes()))
}
pub fn file_name(&self) -> OsString {
OsStr::from_bytes(self.name_bytes()).to_os_string()
}
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
pub fn file_type(&self) -> io::Result<FileType> {
extern {
fn rust_dir_get_mode(ptr: *mut libc::dirent_t) -> c_int;
}
unsafe {
match rust_dir_get_mode(self.dirent()) {
-1 => lstat(&self.path()).map(|m| m.file_type()),
n => Ok(FileType { mode: n as mode_t }),
}
}
}
pub fn ino(&self) -> raw::ino_t {
extern {
fn rust_dir_get_ino(ptr: *mut libc::dirent_t) -> raw::ino_t;
}
unsafe { rust_dir_get_ino(self.dirent()) }
}
fn name_bytes(&self) -> &[u8] {
extern {
fn rust_list_dir_val(ptr: *mut libc::dirent_t) -> *const c_char;
}
unsafe {
CStr::from_ptr(rust_list_dir_val(self.dirent())).to_bytes()
}
}
fn dirent(&self) -> *mut libc::dirent_t {
self.buf.as_ptr() as *mut _
}
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
flags: 0,
read: false,
write: false,
mode: 0o666,
}
}
pub fn read(&mut self, read: bool) {
self.read = read;
}
pub fn write(&mut self, write: bool) {
self.write = write;
}
pub fn append(&mut self, append: bool) {
self.flag(libc::O_APPEND, append);
}
pub fn truncate(&mut self, truncate: bool) {
self.flag(libc::O_TRUNC, truncate);
}
pub fn create(&mut self, create: bool) {
self.flag(libc::O_CREAT, create);
}
pub fn mode(&mut self, mode: raw::mode_t) {
self.mode = mode as mode_t;
}
fn flag(&mut self, bit: c_int, on: bool) {
if on {
self.flags |= bit;
} else {
self.flags &= !bit;
}
}
}
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = try!(cstr(path));
File::open_c(&path, opts)
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
let flags = opts.flags | match (opts.read, opts.write) {
(true, true) => libc::O_RDWR,
(false, true) => libc::O_WRONLY,
(true, false) |
(false, false) => libc::O_RDONLY,
};
let fd = try!(cvt_r(|| unsafe {
libc::open(path.as_ptr(), flags, opts.mode)
}));
let fd = FileDesc::new(fd);
fd.set_cloexec();
Ok(File(fd))
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::fstat(self.0.raw(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { libc::fsync(self.0.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { os_datasync(self.0.raw()) }));
return Ok(());
#[cfg(any(target_os = "macos", target_os = "ios"))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fcntl(fd, libc::F_FULLFSYNC)
}
#[cfg(target_os = "linux")]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fdatasync(fd) }
#[cfg(not(any(target_os = "macos",
target_os = "ios",
target_os = "linux")))]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fsync(fd) }
}
pub fn truncate(&self, size: u64) -> io::Result<()> {
try!(cvt_r(|| unsafe {
libc::ftruncate(self.0.raw(), size as libc::off_t)
}));
Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(off) => (libc::SEEK_SET, off as off_t),
SeekFrom::End(off) => (libc::SEEK_END, off as off_t),
SeekFrom::Current(off) => (libc::SEEK_CUR, off as off_t),
};
let n = try!(cvt(unsafe { libc::lseek(self.0.raw(), pos, whence) }));
Ok(n as u64)
}
pub fn fd(&self) -> &FileDesc { &self.0 }
}
impl DirBuilder {
pub fn new() -> DirBuilder {
DirBuilder { mode: 0o777 }
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }));
Ok(())
}
pub fn set_mode(&mut self, mode: mode_t) {
self.mode = mode;
}
}
fn cstr(path: &Path) -> io::Result<CString> {
path.as_os_str().to_cstring().ok_or(
io::Error::new(io::ErrorKind::InvalidInput, "path contained a null"))
}
impl FromInner<c_int> for File {
fn from_inner(fd: c_int) -> File {
File(FileDesc::new(fd))
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(target_os = "linux")]
fn get_path(fd: c_int) -> Option<PathBuf> {
use string::ToString;
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
readlink(&p).ok()
}
#[cfg(not(target_os = "linux"))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
#[cfg(target_os = "linux")]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
return None;
}
match mode & libc::O_ACCMODE {
libc::O_RDONLY => Some((true, false)),
libc::O_RDWR => Some((true, true)),
libc::O_WRONLY => Some((false, true)),
_ => None
}
}
#[cfg(not(target_os = "linux"))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
}
let fd = self.0.raw();
let mut b = f.debug_struct("File");
b.field("fd", &fd);
if let Some(path) = get_path(fd) {
b.field("path", &path);
}
if let Some((read, write)) = get_mode(fd) {
b.field("read", &read).field("write", &write);
}
b.finish()
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = Arc::new(p.to_path_buf());
let p = try!(cstr(p));
unsafe {
let ptr = libc::opendir(p.as_ptr());
if ptr.is_null() {
Err(Error::last_os_error())
} else {
Ok(ReadDir { dirp: Dir(ptr), root: root })
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::unlink(p.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = try!(cstr(old));
let new = try!(cstr(new));
try!(cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }));
Ok(())
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::rmdir(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let c_path = try!(cstr(p));
let p = c_path.as_ptr();
let mut len = unsafe { libc::pathconf(p as *mut _, libc::_PC_NAME_MAX) };
if len < 0 {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as usize);
unsafe {
let n = try!(cvt({
libc::readlink(p, buf.as_ptr() as *mut c_char, len as size_t)
}));
buf.set_len(n as usize);
}
Ok(PathBuf::from(OsString::from_vec(buf)))
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::stat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::lstat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn utimes(p: &Path, atime: u64, mtime: u64) -> io::Result<()> {
let p = try!(cstr(p));
let buf = [super::ms_to_timeval(atime), super::ms_to_timeval(mtime)];
try!(cvt(unsafe { c::utimes(p.as_ptr(), buf.as_ptr()) }));
Ok(())
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let path = try!(CString::new(p.as_os_str().as_bytes()));
let mut buf = vec![0u8; 16 * 1024];
unsafe {
let r = c::realpath(path.as_ptr(), buf.as_mut_ptr() as *mut _);
if r.is_null() {
return Err(io::Error::last_os_error())
}
}
let p = buf.iter().position(|i| *i == 0).unwrap();
buf.truncate(p);
Ok(PathBuf::from(OsString::from_vec(buf)))
}
|
{
return None
}
|
conditional_block
|
fs.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use io::prelude::*;
use os::unix::prelude::*;
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
use io::{self, Error, SeekFrom};
use libc::{self, c_int, size_t, off_t, c_char, mode_t};
use mem;
use path::{Path, PathBuf};
use ptr;
use sync::Arc;
use sys::fd::FileDesc;
use sys::platform::raw;
use sys::{c, cvt, cvt_r};
use sys_common::{AsInner, FromInner};
use vec::Vec;
pub struct File(FileDesc);
pub struct FileAttr {
stat: raw::stat,
}
pub struct ReadDir {
dirp: Dir,
root: Arc<PathBuf>,
}
struct Dir(*mut libc::DIR);
unsafe impl Send for Dir {}
unsafe impl Sync for Dir {}
pub struct DirEntry {
buf: Vec<u8>, // actually *mut libc::dirent_t
root: Arc<PathBuf>,
}
#[derive(Clone)]
pub struct OpenOptions {
flags: c_int,
read: bool,
write: bool,
mode: mode_t,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions { mode: mode_t }
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct FileType { mode: mode_t }
pub struct DirBuilder { mode: mode_t }
impl FileAttr {
pub fn size(&self) -> u64 { self.stat.st_size as u64 }
pub fn perm(&self) -> FilePermissions {
FilePermissions { mode: (self.stat.st_mode as mode_t) & 0o777 }
}
pub fn accessed(&self) -> u64 {
self.mktime(self.stat.st_atime as u64, self.stat.st_atime_nsec as u64)
}
pub fn modified(&self) -> u64 {
self.mktime(self.stat.st_mtime as u64, self.stat.st_mtime_nsec as u64)
}
pub fn file_type(&self) -> FileType {
FileType { mode: self.stat.st_mode as mode_t }
}
pub fn raw(&self) -> &raw::stat { &self.stat }
// times are in milliseconds (currently)
fn mktime(&self, secs: u64, nsecs: u64) -> u64 {
secs * 1000 + nsecs / 1000000
}
}
impl AsInner<raw::stat> for FileAttr {
fn as_inner(&self) -> &raw::stat { &self.stat }
}
#[unstable(feature = "metadata_ext", reason = "recently added API")]
pub trait MetadataExt {
fn as_raw_stat(&self) -> &raw::stat;
}
impl MetadataExt for ::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { &self.as_inner().stat }
}
impl MetadataExt for ::os::unix::fs::Metadata {
fn as_raw_stat(&self) -> &raw::stat { self.as_inner() }
}
impl FilePermissions {
pub fn readonly(&self) -> bool { self.mode & 0o222 == 0 }
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
pub fn mode(&self) -> raw::mode_t { self.mode }
}
impl FileType {
pub fn is_dir(&self) -> bool { self.is(libc::S_IFDIR) }
pub fn is_file(&self) -> bool { self.is(libc::S_IFREG) }
pub fn is_symlink(&self) -> bool { self.is(libc::S_IFLNK) }
fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
}
impl FromInner<raw::mode_t> for FilePermissions {
fn from_inner(mode: raw::mode_t) -> FilePermissions {
FilePermissions { mode: mode as mode_t }
}
}
impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
extern {
fn rust_dirent_t_size() -> c_int;
}
let mut buf: Vec<u8> = Vec::with_capacity(unsafe {
rust_dirent_t_size() as usize
});
let ptr = buf.as_mut_ptr() as *mut libc::dirent_t;
let mut entry_ptr = ptr::null_mut();
loop {
if unsafe { libc::readdir_r(self.dirp.0, ptr, &mut entry_ptr) != 0 } {
return Some(Err(Error::last_os_error()))
}
if entry_ptr.is_null() {
return None
}
let entry = DirEntry {
buf: buf,
root: self.root.clone()
};
if entry.name_bytes() == b"." || entry.name_bytes() == b".." {
buf = entry.buf;
} else {
return Some(Ok(entry))
}
}
}
}
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
debug_assert_eq!(r, 0);
|
pub fn path(&self) -> PathBuf {
self.root.join(<OsStr as OsStrExt>::from_bytes(self.name_bytes()))
}
pub fn file_name(&self) -> OsString {
OsStr::from_bytes(self.name_bytes()).to_os_string()
}
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
pub fn file_type(&self) -> io::Result<FileType> {
extern {
fn rust_dir_get_mode(ptr: *mut libc::dirent_t) -> c_int;
}
unsafe {
match rust_dir_get_mode(self.dirent()) {
-1 => lstat(&self.path()).map(|m| m.file_type()),
n => Ok(FileType { mode: n as mode_t }),
}
}
}
pub fn ino(&self) -> raw::ino_t {
extern {
fn rust_dir_get_ino(ptr: *mut libc::dirent_t) -> raw::ino_t;
}
unsafe { rust_dir_get_ino(self.dirent()) }
}
fn name_bytes(&self) -> &[u8] {
extern {
fn rust_list_dir_val(ptr: *mut libc::dirent_t) -> *const c_char;
}
unsafe {
CStr::from_ptr(rust_list_dir_val(self.dirent())).to_bytes()
}
}
fn dirent(&self) -> *mut libc::dirent_t {
self.buf.as_ptr() as *mut _
}
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
flags: 0,
read: false,
write: false,
mode: 0o666,
}
}
pub fn read(&mut self, read: bool) {
self.read = read;
}
pub fn write(&mut self, write: bool) {
self.write = write;
}
pub fn append(&mut self, append: bool) {
self.flag(libc::O_APPEND, append);
}
pub fn truncate(&mut self, truncate: bool) {
self.flag(libc::O_TRUNC, truncate);
}
pub fn create(&mut self, create: bool) {
self.flag(libc::O_CREAT, create);
}
pub fn mode(&mut self, mode: raw::mode_t) {
self.mode = mode as mode_t;
}
fn flag(&mut self, bit: c_int, on: bool) {
if on {
self.flags |= bit;
} else {
self.flags &= !bit;
}
}
}
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = try!(cstr(path));
File::open_c(&path, opts)
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
let flags = opts.flags | match (opts.read, opts.write) {
(true, true) => libc::O_RDWR,
(false, true) => libc::O_WRONLY,
(true, false) |
(false, false) => libc::O_RDONLY,
};
let fd = try!(cvt_r(|| unsafe {
libc::open(path.as_ptr(), flags, opts.mode)
}));
let fd = FileDesc::new(fd);
fd.set_cloexec();
Ok(File(fd))
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::fstat(self.0.raw(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { libc::fsync(self.0.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> {
try!(cvt_r(|| unsafe { os_datasync(self.0.raw()) }));
return Ok(());
#[cfg(any(target_os = "macos", target_os = "ios"))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fcntl(fd, libc::F_FULLFSYNC)
}
#[cfg(target_os = "linux")]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fdatasync(fd) }
#[cfg(not(any(target_os = "macos",
target_os = "ios",
target_os = "linux")))]
unsafe fn os_datasync(fd: c_int) -> c_int { libc::fsync(fd) }
}
pub fn truncate(&self, size: u64) -> io::Result<()> {
try!(cvt_r(|| unsafe {
libc::ftruncate(self.0.raw(), size as libc::off_t)
}));
Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(off) => (libc::SEEK_SET, off as off_t),
SeekFrom::End(off) => (libc::SEEK_END, off as off_t),
SeekFrom::Current(off) => (libc::SEEK_CUR, off as off_t),
};
let n = try!(cvt(unsafe { libc::lseek(self.0.raw(), pos, whence) }));
Ok(n as u64)
}
pub fn fd(&self) -> &FileDesc { &self.0 }
}
impl DirBuilder {
pub fn new() -> DirBuilder {
DirBuilder { mode: 0o777 }
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }));
Ok(())
}
pub fn set_mode(&mut self, mode: mode_t) {
self.mode = mode;
}
}
fn cstr(path: &Path) -> io::Result<CString> {
path.as_os_str().to_cstring().ok_or(
io::Error::new(io::ErrorKind::InvalidInput, "path contained a null"))
}
impl FromInner<c_int> for File {
fn from_inner(fd: c_int) -> File {
File(FileDesc::new(fd))
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(target_os = "linux")]
fn get_path(fd: c_int) -> Option<PathBuf> {
use string::ToString;
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
readlink(&p).ok()
}
#[cfg(not(target_os = "linux"))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
#[cfg(target_os = "linux")]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
return None;
}
match mode & libc::O_ACCMODE {
libc::O_RDONLY => Some((true, false)),
libc::O_RDWR => Some((true, true)),
libc::O_WRONLY => Some((false, true)),
_ => None
}
}
#[cfg(not(target_os = "linux"))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
}
let fd = self.0.raw();
let mut b = f.debug_struct("File");
b.field("fd", &fd);
if let Some(path) = get_path(fd) {
b.field("path", &path);
}
if let Some((read, write)) = get_mode(fd) {
b.field("read", &read).field("write", &write);
}
b.finish()
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = Arc::new(p.to_path_buf());
let p = try!(cstr(p));
unsafe {
let ptr = libc::opendir(p.as_ptr());
if ptr.is_null() {
Err(Error::last_os_error())
} else {
Ok(ReadDir { dirp: Dir(ptr), root: root })
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::unlink(p.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = try!(cstr(old));
let new = try!(cstr(new));
try!(cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }));
Ok(())
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = try!(cstr(p));
try!(cvt(unsafe { libc::rmdir(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let c_path = try!(cstr(p));
let p = c_path.as_ptr();
let mut len = unsafe { libc::pathconf(p as *mut _, libc::_PC_NAME_MAX) };
if len < 0 {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as usize);
unsafe {
let n = try!(cvt({
libc::readlink(p, buf.as_ptr() as *mut c_char, len as size_t)
}));
buf.set_len(n as usize);
}
Ok(PathBuf::from(OsString::from_vec(buf)))
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = try!(cstr(src));
let dst = try!(cstr(dst));
try!(cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) }));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::stat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let p = try!(cstr(p));
let mut stat: raw::stat = unsafe { mem::zeroed() };
try!(cvt(unsafe {
libc::lstat(p.as_ptr(), &mut stat as *mut _ as *mut _)
}));
Ok(FileAttr { stat: stat })
}
pub fn utimes(p: &Path, atime: u64, mtime: u64) -> io::Result<()> {
let p = try!(cstr(p));
let buf = [super::ms_to_timeval(atime), super::ms_to_timeval(mtime)];
try!(cvt(unsafe { c::utimes(p.as_ptr(), buf.as_ptr()) }));
Ok(())
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let path = try!(CString::new(p.as_os_str().as_bytes()));
let mut buf = vec![0u8; 16 * 1024];
unsafe {
let r = c::realpath(path.as_ptr(), buf.as_mut_ptr() as *mut _);
if r.is_null() {
return Err(io::Error::last_os_error())
}
}
let p = buf.iter().position(|i| *i == 0).unwrap();
buf.truncate(p);
Ok(PathBuf::from(OsString::from_vec(buf)))
}
|
}
}
impl DirEntry {
|
random_line_split
|
mod.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Networking I/O
use io::{IoError, IoResult, InvalidInput};
use option::None;
use result::{Ok, Err};
use self::ip::{SocketAddr, ToSocketAddr};
pub use self::addrinfo::get_host_addresses;
pub mod addrinfo;
pub mod tcp;
pub mod udp;
pub mod ip;
pub mod pipe;
fn with_addresses<A: ToSocketAddr, T>(addr: A, action: |SocketAddr| -> IoResult<T>)
-> IoResult<T> {
const DEFAULT_ERROR: IoError = IoError {
kind: InvalidInput,
desc: "no addresses found for hostname",
detail: None
};
let addresses = try!(addr.to_socket_addr_all());
let mut err = DEFAULT_ERROR;
|
match action(addr) {
Ok(r) => return Ok(r),
Err(e) => err = e
}
}
Err(err)
}
|
for addr in addresses.into_iter() {
|
random_line_split
|
_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
|
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
# commit: short hex revision ID
|
random_line_split
|
_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
|
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
|
identifier_body
|
_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def
|
(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
render_pep440_old
|
identifier_name
|
_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
|
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
|
conditional_block
|
translation_construction.rs
|
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use num::{One, Zero};
use rand::distributions::{Distribution, Standard};
use rand::Rng;
use simba::scalar::ClosedAdd;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, U1, U2, U3, U4, U5, U6};
use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Translation;
impl<N: Scalar + Zero, D: DimName> Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity translation.
///
/// # Example
/// ```
/// # use nalgebra::{Point2, Point3, Translation2, Translation3};
/// let t = Translation2::identity();
/// let p = Point2::new(1.0, 2.0);
/// assert_eq!(t * p, p);
///
/// // Works in all dimensions.
/// let t = Translation3::identity();
/// let p = Point3::new(1.0, 2.0, 3.0);
/// assert_eq!(t * p, p);
/// ```
#[inline]
pub fn identity() -> Translation<N, D> {
Self::from(VectorN::<N, D>::from_element(N::zero()))
}
}
impl<N: Scalar + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn one() -> Self {
Self::identity()
}
}
impl<N: Scalar, D: DimName> Distribution<Translation<N, D>> for Standard
where
DefaultAllocator: Allocator<N, D>,
Standard: Distribution<N>,
{
#[inline]
fn
|
<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> Translation<N, D> {
Translation::from(rng.gen::<VectorN<N, D>>())
}
}
#[cfg(feature = "arbitrary")]
impl<N: Scalar + Arbitrary, D: DimName> Arbitrary for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Send,
{
#[inline]
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
let v: VectorN<N, D> = Arbitrary::arbitrary(rng);
Self::from(v)
}
}
/*
*
* Small translation construction from components.
*
*/
macro_rules! componentwise_constructors_impl(
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<N: Scalar> Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[doc = "Initializes this translation from its components."]
#[doc = "# Example\n```"]
#[doc = $doc]
#[doc = "```"]
#[inline]
pub fn new($($args: N),*) -> Self {
Self::from(VectorN::<N, $D>::new($($args),*))
}
}
)*}
);
componentwise_constructors_impl!(
"# use nalgebra::Translation1;\nlet t = Translation1::new(1.0);\nassert!(t.vector.x == 1.0);";
U1, x:0;
"# use nalgebra::Translation2;\nlet t = Translation2::new(1.0, 2.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0);";
U2, x:0, y:1;
"# use nalgebra::Translation3;\nlet t = Translation3::new(1.0, 2.0, 3.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0);";
U3, x:0, y:1, z:2;
"# use nalgebra::Translation4;\nlet t = Translation4::new(1.0, 2.0, 3.0, 4.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0);";
U4, x:0, y:1, z:2, w:3;
"# use nalgebra::Translation5;\nlet t = Translation5::new(1.0, 2.0, 3.0, 4.0, 5.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0);";
U5, x:0, y:1, z:2, w:3, a:4;
"# use nalgebra::Translation6;\nlet t = Translation6::new(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0 && t.vector.b == 6.0);";
U6, x:0, y:1, z:2, w:3, a:4, b:5;
);
|
sample
|
identifier_name
|
translation_construction.rs
|
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
|
use simba::scalar::ClosedAdd;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, U1, U2, U3, U4, U5, U6};
use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Translation;
impl<N: Scalar + Zero, D: DimName> Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity translation.
///
/// # Example
/// ```
/// # use nalgebra::{Point2, Point3, Translation2, Translation3};
/// let t = Translation2::identity();
/// let p = Point2::new(1.0, 2.0);
/// assert_eq!(t * p, p);
///
/// // Works in all dimensions.
/// let t = Translation3::identity();
/// let p = Point3::new(1.0, 2.0, 3.0);
/// assert_eq!(t * p, p);
/// ```
#[inline]
pub fn identity() -> Translation<N, D> {
Self::from(VectorN::<N, D>::from_element(N::zero()))
}
}
impl<N: Scalar + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn one() -> Self {
Self::identity()
}
}
impl<N: Scalar, D: DimName> Distribution<Translation<N, D>> for Standard
where
DefaultAllocator: Allocator<N, D>,
Standard: Distribution<N>,
{
#[inline]
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> Translation<N, D> {
Translation::from(rng.gen::<VectorN<N, D>>())
}
}
#[cfg(feature = "arbitrary")]
impl<N: Scalar + Arbitrary, D: DimName> Arbitrary for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Send,
{
#[inline]
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
let v: VectorN<N, D> = Arbitrary::arbitrary(rng);
Self::from(v)
}
}
/*
*
* Small translation construction from components.
*
*/
macro_rules! componentwise_constructors_impl(
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<N: Scalar> Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[doc = "Initializes this translation from its components."]
#[doc = "# Example\n```"]
#[doc = $doc]
#[doc = "```"]
#[inline]
pub fn new($($args: N),*) -> Self {
Self::from(VectorN::<N, $D>::new($($args),*))
}
}
)*}
);
componentwise_constructors_impl!(
"# use nalgebra::Translation1;\nlet t = Translation1::new(1.0);\nassert!(t.vector.x == 1.0);";
U1, x:0;
"# use nalgebra::Translation2;\nlet t = Translation2::new(1.0, 2.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0);";
U2, x:0, y:1;
"# use nalgebra::Translation3;\nlet t = Translation3::new(1.0, 2.0, 3.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0);";
U3, x:0, y:1, z:2;
"# use nalgebra::Translation4;\nlet t = Translation4::new(1.0, 2.0, 3.0, 4.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0);";
U4, x:0, y:1, z:2, w:3;
"# use nalgebra::Translation5;\nlet t = Translation5::new(1.0, 2.0, 3.0, 4.0, 5.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0);";
U5, x:0, y:1, z:2, w:3, a:4;
"# use nalgebra::Translation6;\nlet t = Translation6::new(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0 && t.vector.b == 6.0);";
U6, x:0, y:1, z:2, w:3, a:4, b:5;
);
|
use quickcheck::{Arbitrary, Gen};
use num::{One, Zero};
use rand::distributions::{Distribution, Standard};
use rand::Rng;
|
random_line_split
|
translation_construction.rs
|
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use num::{One, Zero};
use rand::distributions::{Distribution, Standard};
use rand::Rng;
use simba::scalar::ClosedAdd;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, U1, U2, U3, U4, U5, U6};
use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Translation;
impl<N: Scalar + Zero, D: DimName> Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity translation.
///
/// # Example
/// ```
/// # use nalgebra::{Point2, Point3, Translation2, Translation3};
/// let t = Translation2::identity();
/// let p = Point2::new(1.0, 2.0);
/// assert_eq!(t * p, p);
///
/// // Works in all dimensions.
/// let t = Translation3::identity();
/// let p = Point3::new(1.0, 2.0, 3.0);
/// assert_eq!(t * p, p);
/// ```
#[inline]
pub fn identity() -> Translation<N, D> {
Self::from(VectorN::<N, D>::from_element(N::zero()))
}
}
impl<N: Scalar + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn one() -> Self
|
}
impl<N: Scalar, D: DimName> Distribution<Translation<N, D>> for Standard
where
DefaultAllocator: Allocator<N, D>,
Standard: Distribution<N>,
{
#[inline]
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> Translation<N, D> {
Translation::from(rng.gen::<VectorN<N, D>>())
}
}
#[cfg(feature = "arbitrary")]
impl<N: Scalar + Arbitrary, D: DimName> Arbitrary for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Send,
{
#[inline]
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
let v: VectorN<N, D> = Arbitrary::arbitrary(rng);
Self::from(v)
}
}
/*
*
* Small translation construction from components.
*
*/
macro_rules! componentwise_constructors_impl(
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<N: Scalar> Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[doc = "Initializes this translation from its components."]
#[doc = "# Example\n```"]
#[doc = $doc]
#[doc = "```"]
#[inline]
pub fn new($($args: N),*) -> Self {
Self::from(VectorN::<N, $D>::new($($args),*))
}
}
)*}
);
componentwise_constructors_impl!(
"# use nalgebra::Translation1;\nlet t = Translation1::new(1.0);\nassert!(t.vector.x == 1.0);";
U1, x:0;
"# use nalgebra::Translation2;\nlet t = Translation2::new(1.0, 2.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0);";
U2, x:0, y:1;
"# use nalgebra::Translation3;\nlet t = Translation3::new(1.0, 2.0, 3.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0);";
U3, x:0, y:1, z:2;
"# use nalgebra::Translation4;\nlet t = Translation4::new(1.0, 2.0, 3.0, 4.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0);";
U4, x:0, y:1, z:2, w:3;
"# use nalgebra::Translation5;\nlet t = Translation5::new(1.0, 2.0, 3.0, 4.0, 5.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0);";
U5, x:0, y:1, z:2, w:3, a:4;
"# use nalgebra::Translation6;\nlet t = Translation6::new(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0 && t.vector.b == 6.0);";
U6, x:0, y:1, z:2, w:3, a:4, b:5;
);
|
{
Self::identity()
}
|
identifier_body
|
stream.rs
|
// Copyright © 2017-2018 Mozilla Foundation
//
// This program is made available under an ISC-style license. See the
// accompanying file LICENSE for details.
use callbacks::cubeb_device_changed_callback;
use channel::cubeb_channel_layout;
use device::cubeb_device;
use format::cubeb_sample_format;
use std::{fmt, mem};
use std::os::raw::{c_float, c_int, c_uint, c_void, c_char};
cubeb_enum! {
pub enum cubeb_stream_prefs {
CUBEB_STREAM_PREF_NONE = 0x00,
CUBEB_STREAM_PREF_LOOPBACK = 0x01,
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02,
CUBEB_STREAM_PREF_VOICE = 0x04,
}
}
cubeb_enum! {
pub enum cubeb_state {
CUBEB_STATE_STARTED,
CUBEB_STATE_STOPPED,
CUBEB_STATE_DRAINED,
CUBEB_STATE_ERROR,
}
}
pub enum c
|
{}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct cubeb_stream_params {
pub format: cubeb_sample_format,
pub rate: c_uint,
pub channels: c_uint,
pub layout: cubeb_channel_layout,
pub prefs: cubeb_stream_prefs,
}
impl Default for cubeb_stream_params {
fn default() -> Self {
unsafe { mem::zeroed() }
}
}
// Explicit Debug impl to work around bug in ctest
impl fmt::Debug for cubeb_stream_params {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("cubeb_stream_params")
.field("format", &self.format)
.field("rate", &self.rate)
.field("channels", &self.channels)
.field("layout", &self.layout)
.field("prefs", &self.prefs)
.finish()
}
}
extern "C" {
pub fn cubeb_stream_destroy(stream: *mut cubeb_stream);
pub fn cubeb_stream_start(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_stop(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_reset_default_device(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_get_position(stream: *mut cubeb_stream, position: *mut u64) -> c_int;
pub fn cubeb_stream_get_latency(stream: *mut cubeb_stream, latency: *mut c_uint) -> c_int;
pub fn cubeb_stream_get_input_latency(stream: *mut cubeb_stream, latency: *mut c_uint) -> c_int;
pub fn cubeb_stream_set_volume(stream: *mut cubeb_stream, volume: c_float) -> c_int;
pub fn cubeb_stream_set_name(stream: *mut cubeb_stream, name: *const c_char) -> c_int;
pub fn cubeb_stream_get_current_device(
stream: *mut cubeb_stream,
device: *mut *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_device_destroy(
stream: *mut cubeb_stream,
devices: *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_register_device_changed_callback(
stream: *mut cubeb_stream,
device_changed_callback: cubeb_device_changed_callback,
) -> c_int;
pub fn cubeb_stream_user_ptr(stream: *mut cubeb_stream) -> *mut c_void;
}
|
ubeb_stream
|
identifier_name
|
stream.rs
|
// Copyright © 2017-2018 Mozilla Foundation
//
// This program is made available under an ISC-style license. See the
// accompanying file LICENSE for details.
use callbacks::cubeb_device_changed_callback;
use channel::cubeb_channel_layout;
use device::cubeb_device;
use format::cubeb_sample_format;
use std::{fmt, mem};
use std::os::raw::{c_float, c_int, c_uint, c_void, c_char};
cubeb_enum! {
pub enum cubeb_stream_prefs {
|
CUBEB_STREAM_PREF_NONE = 0x00,
CUBEB_STREAM_PREF_LOOPBACK = 0x01,
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02,
CUBEB_STREAM_PREF_VOICE = 0x04,
}
}
cubeb_enum! {
pub enum cubeb_state {
CUBEB_STATE_STARTED,
CUBEB_STATE_STOPPED,
CUBEB_STATE_DRAINED,
CUBEB_STATE_ERROR,
}
}
pub enum cubeb_stream {}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct cubeb_stream_params {
pub format: cubeb_sample_format,
pub rate: c_uint,
pub channels: c_uint,
pub layout: cubeb_channel_layout,
pub prefs: cubeb_stream_prefs,
}
impl Default for cubeb_stream_params {
fn default() -> Self {
unsafe { mem::zeroed() }
}
}
// Explicit Debug impl to work around bug in ctest
impl fmt::Debug for cubeb_stream_params {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("cubeb_stream_params")
.field("format", &self.format)
.field("rate", &self.rate)
.field("channels", &self.channels)
.field("layout", &self.layout)
.field("prefs", &self.prefs)
.finish()
}
}
extern "C" {
pub fn cubeb_stream_destroy(stream: *mut cubeb_stream);
pub fn cubeb_stream_start(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_stop(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_reset_default_device(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_get_position(stream: *mut cubeb_stream, position: *mut u64) -> c_int;
pub fn cubeb_stream_get_latency(stream: *mut cubeb_stream, latency: *mut c_uint) -> c_int;
pub fn cubeb_stream_get_input_latency(stream: *mut cubeb_stream, latency: *mut c_uint) -> c_int;
pub fn cubeb_stream_set_volume(stream: *mut cubeb_stream, volume: c_float) -> c_int;
pub fn cubeb_stream_set_name(stream: *mut cubeb_stream, name: *const c_char) -> c_int;
pub fn cubeb_stream_get_current_device(
stream: *mut cubeb_stream,
device: *mut *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_device_destroy(
stream: *mut cubeb_stream,
devices: *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_register_device_changed_callback(
stream: *mut cubeb_stream,
device_changed_callback: cubeb_device_changed_callback,
) -> c_int;
pub fn cubeb_stream_user_ptr(stream: *mut cubeb_stream) -> *mut c_void;
}
|
random_line_split
|
|
build.rs
|
extern crate bindgen;
use std::fs::{File, create_dir_all, metadata};
use std::path::Path;
use std::io::{ErrorKind, Write};
const HEADERS : &'static [&'static str] = &["ssl", "entropy", "ctr_drbg"];
const HEADER_BASE : &'static str = "/usr/local/include/mbedtls/";
const MOD_FILE : &'static str = r#"
#[allow(dead_code, non_camel_case_types, non_snake_case, non_upper_case_globals)]
mod bindings;
"#;
fn main() {
for header in HEADERS.iter() {
gen(header);
}
}
fn gen(header: &str) {
let dir = "src/mbed/".to_string() + header + "/";
let file = dir.clone() + "bindings.rs";
create_dir_all(&dir).unwrap();
let bindings_file = File::create(file).unwrap();
bindgen::Builder::default()
.header(HEADER_BASE.to_string() + header + ".h")
.link("mbedtls")
.link("mbedx509")
.link("mbedcrypto")
.emit_builtins()
.generate().unwrap()
.write(Box::new(bindings_file))
.unwrap();
;
let mod_file_str = dir.clone() + "/mod.rs";
let metadata = metadata(Path::new(&mod_file_str));
if let Err(e) = metadata {
if let ErrorKind::NotFound = e.kind()
|
}
}
|
{
let mut mod_file = File::create(mod_file_str).unwrap();
mod_file.write(MOD_FILE.as_bytes()).unwrap();
}
|
conditional_block
|
build.rs
|
extern crate bindgen;
use std::fs::{File, create_dir_all, metadata};
|
use std::io::{ErrorKind, Write};
const HEADERS : &'static [&'static str] = &["ssl", "entropy", "ctr_drbg"];
const HEADER_BASE : &'static str = "/usr/local/include/mbedtls/";
const MOD_FILE : &'static str = r#"
#[allow(dead_code, non_camel_case_types, non_snake_case, non_upper_case_globals)]
mod bindings;
"#;
fn main() {
for header in HEADERS.iter() {
gen(header);
}
}
fn gen(header: &str) {
let dir = "src/mbed/".to_string() + header + "/";
let file = dir.clone() + "bindings.rs";
create_dir_all(&dir).unwrap();
let bindings_file = File::create(file).unwrap();
bindgen::Builder::default()
.header(HEADER_BASE.to_string() + header + ".h")
.link("mbedtls")
.link("mbedx509")
.link("mbedcrypto")
.emit_builtins()
.generate().unwrap()
.write(Box::new(bindings_file))
.unwrap();
;
let mod_file_str = dir.clone() + "/mod.rs";
let metadata = metadata(Path::new(&mod_file_str));
if let Err(e) = metadata {
if let ErrorKind::NotFound = e.kind() {
let mut mod_file = File::create(mod_file_str).unwrap();
mod_file.write(MOD_FILE.as_bytes()).unwrap();
}
}
}
|
use std::path::Path;
|
random_line_split
|
build.rs
|
extern crate bindgen;
use std::fs::{File, create_dir_all, metadata};
use std::path::Path;
use std::io::{ErrorKind, Write};
const HEADERS : &'static [&'static str] = &["ssl", "entropy", "ctr_drbg"];
const HEADER_BASE : &'static str = "/usr/local/include/mbedtls/";
const MOD_FILE : &'static str = r#"
#[allow(dead_code, non_camel_case_types, non_snake_case, non_upper_case_globals)]
mod bindings;
"#;
fn main()
|
fn gen(header: &str) {
let dir = "src/mbed/".to_string() + header + "/";
let file = dir.clone() + "bindings.rs";
create_dir_all(&dir).unwrap();
let bindings_file = File::create(file).unwrap();
bindgen::Builder::default()
.header(HEADER_BASE.to_string() + header + ".h")
.link("mbedtls")
.link("mbedx509")
.link("mbedcrypto")
.emit_builtins()
.generate().unwrap()
.write(Box::new(bindings_file))
.unwrap();
;
let mod_file_str = dir.clone() + "/mod.rs";
let metadata = metadata(Path::new(&mod_file_str));
if let Err(e) = metadata {
if let ErrorKind::NotFound = e.kind() {
let mut mod_file = File::create(mod_file_str).unwrap();
mod_file.write(MOD_FILE.as_bytes()).unwrap();
}
}
}
|
{
for header in HEADERS.iter() {
gen(header);
}
}
|
identifier_body
|
build.rs
|
extern crate bindgen;
use std::fs::{File, create_dir_all, metadata};
use std::path::Path;
use std::io::{ErrorKind, Write};
const HEADERS : &'static [&'static str] = &["ssl", "entropy", "ctr_drbg"];
const HEADER_BASE : &'static str = "/usr/local/include/mbedtls/";
const MOD_FILE : &'static str = r#"
#[allow(dead_code, non_camel_case_types, non_snake_case, non_upper_case_globals)]
mod bindings;
"#;
fn
|
() {
for header in HEADERS.iter() {
gen(header);
}
}
fn gen(header: &str) {
let dir = "src/mbed/".to_string() + header + "/";
let file = dir.clone() + "bindings.rs";
create_dir_all(&dir).unwrap();
let bindings_file = File::create(file).unwrap();
bindgen::Builder::default()
.header(HEADER_BASE.to_string() + header + ".h")
.link("mbedtls")
.link("mbedx509")
.link("mbedcrypto")
.emit_builtins()
.generate().unwrap()
.write(Box::new(bindings_file))
.unwrap();
;
let mod_file_str = dir.clone() + "/mod.rs";
let metadata = metadata(Path::new(&mod_file_str));
if let Err(e) = metadata {
if let ErrorKind::NotFound = e.kind() {
let mut mod_file = File::create(mod_file_str).unwrap();
mod_file.write(MOD_FILE.as_bytes()).unwrap();
}
}
}
|
main
|
identifier_name
|
generate.rs
|
//! Generate valid parse trees.
use grammar::repr::*;
use rand::{self, Rng};
use std::iter::Iterator;
#[derive(PartialEq, Eq)]
pub enum ParseTree {
Nonterminal(NonterminalString, Vec<ParseTree>),
Terminal(TerminalString),
}
pub fn random_parse_tree(grammar: &Grammar, symbol: NonterminalString) -> ParseTree {
|
let mut gen = Generator {
grammar: grammar,
rng: rand::thread_rng(),
depth: 0,
};
loop {
// sometimes, the random walk overflows the stack, so we have a max, and if
// it is exceeded, we just try again
if let Some(result) = gen.nonterminal(symbol.clone()) {
return result;
}
gen.depth = 0;
}
}
struct Generator<'grammar> {
grammar: &'grammar Grammar,
rng: rand::rngs::ThreadRng,
depth: u32,
}
const MAX_DEPTH: u32 = 10000;
impl<'grammar> Generator<'grammar> {
fn nonterminal(&mut self, nt: NonterminalString) -> Option<ParseTree> {
if self.depth > MAX_DEPTH {
return None;
}
self.depth += 1;
let productions = self.grammar.productions_for(&nt);
let index: usize = self.rng.gen_range(0, productions.len());
let production = &productions[index];
let trees: Option<Vec<_>> = production
.symbols
.iter()
.map(|sym| self.symbol(sym.clone()))
.collect();
trees.map(|trees| ParseTree::Nonterminal(nt, trees))
}
fn symbol(&mut self, symbol: Symbol) -> Option<ParseTree> {
match symbol {
Symbol::Nonterminal(nt) => self.nonterminal(nt),
Symbol::Terminal(t) => Some(ParseTree::Terminal(t)),
}
}
}
impl ParseTree {
pub fn terminals(&self) -> Vec<TerminalString> {
let mut vec = vec![];
self.push_terminals(&mut vec);
vec
}
fn push_terminals(&self, vec: &mut Vec<TerminalString>) {
match *self {
ParseTree::Terminal(ref s) => vec.push(s.clone()),
ParseTree::Nonterminal(_, ref trees) => {
for tree in trees {
tree.push_terminals(vec);
}
}
}
}
}
|
random_line_split
|
|
generate.rs
|
//! Generate valid parse trees.
use grammar::repr::*;
use rand::{self, Rng};
use std::iter::Iterator;
#[derive(PartialEq, Eq)]
pub enum ParseTree {
Nonterminal(NonterminalString, Vec<ParseTree>),
Terminal(TerminalString),
}
pub fn random_parse_tree(grammar: &Grammar, symbol: NonterminalString) -> ParseTree
|
struct Generator<'grammar> {
grammar: &'grammar Grammar,
rng: rand::rngs::ThreadRng,
depth: u32,
}
const MAX_DEPTH: u32 = 10000;
impl<'grammar> Generator<'grammar> {
fn nonterminal(&mut self, nt: NonterminalString) -> Option<ParseTree> {
if self.depth > MAX_DEPTH {
return None;
}
self.depth += 1;
let productions = self.grammar.productions_for(&nt);
let index: usize = self.rng.gen_range(0, productions.len());
let production = &productions[index];
let trees: Option<Vec<_>> = production
.symbols
.iter()
.map(|sym| self.symbol(sym.clone()))
.collect();
trees.map(|trees| ParseTree::Nonterminal(nt, trees))
}
fn symbol(&mut self, symbol: Symbol) -> Option<ParseTree> {
match symbol {
Symbol::Nonterminal(nt) => self.nonterminal(nt),
Symbol::Terminal(t) => Some(ParseTree::Terminal(t)),
}
}
}
impl ParseTree {
pub fn terminals(&self) -> Vec<TerminalString> {
let mut vec = vec![];
self.push_terminals(&mut vec);
vec
}
fn push_terminals(&self, vec: &mut Vec<TerminalString>) {
match *self {
ParseTree::Terminal(ref s) => vec.push(s.clone()),
ParseTree::Nonterminal(_, ref trees) => {
for tree in trees {
tree.push_terminals(vec);
}
}
}
}
}
|
{
let mut gen = Generator {
grammar: grammar,
rng: rand::thread_rng(),
depth: 0,
};
loop {
// sometimes, the random walk overflows the stack, so we have a max, and if
// it is exceeded, we just try again
if let Some(result) = gen.nonterminal(symbol.clone()) {
return result;
}
gen.depth = 0;
}
}
|
identifier_body
|
generate.rs
|
//! Generate valid parse trees.
use grammar::repr::*;
use rand::{self, Rng};
use std::iter::Iterator;
#[derive(PartialEq, Eq)]
pub enum
|
{
Nonterminal(NonterminalString, Vec<ParseTree>),
Terminal(TerminalString),
}
pub fn random_parse_tree(grammar: &Grammar, symbol: NonterminalString) -> ParseTree {
let mut gen = Generator {
grammar: grammar,
rng: rand::thread_rng(),
depth: 0,
};
loop {
// sometimes, the random walk overflows the stack, so we have a max, and if
// it is exceeded, we just try again
if let Some(result) = gen.nonterminal(symbol.clone()) {
return result;
}
gen.depth = 0;
}
}
struct Generator<'grammar> {
grammar: &'grammar Grammar,
rng: rand::rngs::ThreadRng,
depth: u32,
}
const MAX_DEPTH: u32 = 10000;
impl<'grammar> Generator<'grammar> {
fn nonterminal(&mut self, nt: NonterminalString) -> Option<ParseTree> {
if self.depth > MAX_DEPTH {
return None;
}
self.depth += 1;
let productions = self.grammar.productions_for(&nt);
let index: usize = self.rng.gen_range(0, productions.len());
let production = &productions[index];
let trees: Option<Vec<_>> = production
.symbols
.iter()
.map(|sym| self.symbol(sym.clone()))
.collect();
trees.map(|trees| ParseTree::Nonterminal(nt, trees))
}
fn symbol(&mut self, symbol: Symbol) -> Option<ParseTree> {
match symbol {
Symbol::Nonterminal(nt) => self.nonterminal(nt),
Symbol::Terminal(t) => Some(ParseTree::Terminal(t)),
}
}
}
impl ParseTree {
pub fn terminals(&self) -> Vec<TerminalString> {
let mut vec = vec![];
self.push_terminals(&mut vec);
vec
}
fn push_terminals(&self, vec: &mut Vec<TerminalString>) {
match *self {
ParseTree::Terminal(ref s) => vec.push(s.clone()),
ParseTree::Nonterminal(_, ref trees) => {
for tree in trees {
tree.push_terminals(vec);
}
}
}
}
}
|
ParseTree
|
identifier_name
|
udp_connection.rs
|
/*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cell::RefCell;
use std::io;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::{Rc, Weak};
use std::time::Instant;
use log::Level;
use mio::{Event, PollOpt, Ready, Token};
use mio::net::UdpSocket;
use super::binary;
use super::client::{Client, ClientChannel};
use super::connection::{Connection, ConnectionId};
use super::datagram_buffer::DatagramBuffer;
use super::ipv4_header::Ipv4Header;
use super::ipv4_packet::{Ipv4Packet, MAX_PACKET_LENGTH};
use super::packetizer::Packetizer;
use super::selector::Selector;
use super::transport_header::TransportHeader;
const TAG: &'static str = "UdpConnection";
pub const IDLE_TIMEOUT_SECONDS: u64 = 2 * 60;
pub struct UdpConnection {
id: ConnectionId,
client: Weak<RefCell<Client>>,
socket: UdpSocket,
interests: Ready,
token: Token,
client_to_network: DatagramBuffer,
network_to_client: Packetizer,
closed: bool,
idle_since: Instant,
}
impl UdpConnection {
pub fn new(
selector: &mut Selector,
id: ConnectionId,
client: Weak<RefCell<Client>>,
ipv4_header: Ipv4Header,
transport_header: TransportHeader,
) -> io::Result<Rc<RefCell<Self>>>
|
fn create_socket(id: &ConnectionId) -> io::Result<UdpSocket> {
let autobind_addr = SocketAddr::new(Ipv4Addr::new(0, 0, 0, 0).into(), 0);
let udp_socket = UdpSocket::bind(&autobind_addr)?;
udp_socket.connect(id.rewritten_destination().into())?;
Ok(udp_socket)
}
fn remove_from_router(&self) {
// route is embedded in router which is embedded in client: the client necessarily exists
let client_rc = self.client.upgrade().expect("Expected client not found");
let mut client = client_rc.borrow_mut();
client.router().remove(self);
}
fn on_ready(&mut self, selector: &mut Selector, event: Event) {
match self.process(selector, event) {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(_) => panic!("Unexpected unhandled error"),
}
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process(&mut self, selector: &mut Selector, event: Event) -> io::Result<()> {
if !self.closed {
self.touch();
let ready = event.readiness();
if ready.is_readable() || ready.is_writable() {
if ready.is_writable() {
self.process_send(selector)?;
}
if !self.closed && ready.is_readable() {
self.process_receive(selector)?;
}
if !self.closed {
self.update_interests(selector);
}
} else {
// error or hup
self.close(selector);
}
if self.closed {
// on_ready is not called from the router, so the connection must remove itself
self.remove_from_router();
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_send(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.write() {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot write: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_receive(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.read(selector) {
Ok(_) => (),
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot read: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
fn read(&mut self, selector: &mut Selector) -> io::Result<()> {
let ipv4_packet = self.network_to_client.packetize(&mut self.socket)?;
let client_rc = self.client.upgrade().expect("Expected client not found");
match client_rc.borrow_mut().send_to_client(
selector,
&ipv4_packet,
) {
Ok(_) => {
cx_debug!(
target: TAG,
self.id,
"Packet ({} bytes) sent to client",
ipv4_packet.length()
);
if log_enabled!(target: TAG, Level::Trace) {
cx_trace!(
target: TAG,
self.id,
"{}",
binary::to_string(ipv4_packet.raw())
);
}
}
Err(_) => cx_warn!(target: TAG, self.id, "Cannot send to client, drop packet"),
}
Ok(())
}
fn write(&mut self) -> io::Result<()> {
self.client_to_network.write_to(&mut self.socket)?;
Ok(())
}
fn update_interests(&mut self, selector: &mut Selector) {
let ready = if self.client_to_network.is_empty() {
Ready::readable()
} else {
Ready::readable() | Ready::writable()
};
cx_debug!(target: TAG, self.id, "interests: {:?}", ready);
if self.interests != ready {
// interests must be changed
self.interests = ready;
selector
.reregister(&self.socket, self.token, ready, PollOpt::level())
.expect("Cannot register on poll");
}
}
fn touch(&mut self) {
self.idle_since = Instant::now();
}
}
impl Connection for UdpConnection {
fn id(&self) -> &ConnectionId {
&self.id
}
fn send_to_network(
&mut self,
selector: &mut Selector,
_: &mut ClientChannel,
ipv4_packet: &Ipv4Packet,
) {
match self.client_to_network.read_from(
ipv4_packet.payload().expect(
"No payload",
),
) {
Ok(_) => {
self.update_interests(selector);
}
Err(err) => {
cx_warn!(
target: TAG,
self.id,
"Cannot send to network, drop packet: {}",
err
)
}
}
}
fn close(&mut self, selector: &mut Selector) {
cx_info!(target: TAG, self.id, "Close");
self.closed = true;
selector.deregister(&self.socket, self.token).unwrap();
// socket will be closed by RAII
}
fn is_expired(&self) -> bool {
self.idle_since.elapsed().as_secs() > IDLE_TIMEOUT_SECONDS
}
fn is_closed(&self) -> bool {
self.closed
}
}
|
{
cx_info!(target: TAG, id, "Open");
let socket = Self::create_socket(&id)?;
let packetizer = Packetizer::new(&ipv4_header, &transport_header);
let interests = Ready::readable();
let rc = Rc::new(RefCell::new(Self {
id: id,
client: client,
socket: socket,
interests: interests,
token: Token(0), // default value, will be set afterwards
client_to_network: DatagramBuffer::new(4 * MAX_PACKET_LENGTH),
network_to_client: packetizer,
closed: false,
idle_since: Instant::now(),
}));
{
let mut self_ref = rc.borrow_mut();
let rc2 = rc.clone();
// must anotate selector type: https://stackoverflow.com/a/44004103/1987178
let handler =
move |selector: &mut Selector, event| rc2.borrow_mut().on_ready(selector, event);
let token = selector.register(
&self_ref.socket,
handler,
interests,
PollOpt::level(),
)?;
self_ref.token = token;
}
Ok(rc)
}
|
identifier_body
|
udp_connection.rs
|
/*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cell::RefCell;
use std::io;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::{Rc, Weak};
use std::time::Instant;
use log::Level;
use mio::{Event, PollOpt, Ready, Token};
use mio::net::UdpSocket;
use super::binary;
use super::client::{Client, ClientChannel};
use super::connection::{Connection, ConnectionId};
use super::datagram_buffer::DatagramBuffer;
use super::ipv4_header::Ipv4Header;
use super::ipv4_packet::{Ipv4Packet, MAX_PACKET_LENGTH};
use super::packetizer::Packetizer;
use super::selector::Selector;
use super::transport_header::TransportHeader;
const TAG: &'static str = "UdpConnection";
pub const IDLE_TIMEOUT_SECONDS: u64 = 2 * 60;
pub struct UdpConnection {
id: ConnectionId,
client: Weak<RefCell<Client>>,
socket: UdpSocket,
interests: Ready,
token: Token,
client_to_network: DatagramBuffer,
network_to_client: Packetizer,
closed: bool,
idle_since: Instant,
}
impl UdpConnection {
pub fn new(
selector: &mut Selector,
id: ConnectionId,
client: Weak<RefCell<Client>>,
ipv4_header: Ipv4Header,
transport_header: TransportHeader,
) -> io::Result<Rc<RefCell<Self>>> {
cx_info!(target: TAG, id, "Open");
let socket = Self::create_socket(&id)?;
let packetizer = Packetizer::new(&ipv4_header, &transport_header);
let interests = Ready::readable();
let rc = Rc::new(RefCell::new(Self {
id: id,
client: client,
socket: socket,
interests: interests,
token: Token(0), // default value, will be set afterwards
client_to_network: DatagramBuffer::new(4 * MAX_PACKET_LENGTH),
network_to_client: packetizer,
closed: false,
idle_since: Instant::now(),
}));
{
let mut self_ref = rc.borrow_mut();
let rc2 = rc.clone();
// must anotate selector type: https://stackoverflow.com/a/44004103/1987178
let handler =
move |selector: &mut Selector, event| rc2.borrow_mut().on_ready(selector, event);
let token = selector.register(
&self_ref.socket,
handler,
interests,
PollOpt::level(),
)?;
self_ref.token = token;
}
Ok(rc)
}
fn create_socket(id: &ConnectionId) -> io::Result<UdpSocket> {
let autobind_addr = SocketAddr::new(Ipv4Addr::new(0, 0, 0, 0).into(), 0);
let udp_socket = UdpSocket::bind(&autobind_addr)?;
udp_socket.connect(id.rewritten_destination().into())?;
Ok(udp_socket)
}
fn remove_from_router(&self) {
// route is embedded in router which is embedded in client: the client necessarily exists
let client_rc = self.client.upgrade().expect("Expected client not found");
let mut client = client_rc.borrow_mut();
client.router().remove(self);
}
fn on_ready(&mut self, selector: &mut Selector, event: Event) {
match self.process(selector, event) {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(_) => panic!("Unexpected unhandled error"),
}
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process(&mut self, selector: &mut Selector, event: Event) -> io::Result<()> {
if !self.closed {
self.touch();
let ready = event.readiness();
if ready.is_readable() || ready.is_writable() {
if ready.is_writable() {
self.process_send(selector)?;
}
if !self.closed && ready.is_readable() {
self.process_receive(selector)?;
}
if !self.closed {
self.update_interests(selector);
}
} else {
// error or hup
self.close(selector);
}
if self.closed {
// on_ready is not called from the router, so the connection must remove itself
self.remove_from_router();
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_send(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.write() {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot write: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_receive(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.read(selector) {
Ok(_) => (),
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot read: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
fn read(&mut self, selector: &mut Selector) -> io::Result<()> {
let ipv4_packet = self.network_to_client.packetize(&mut self.socket)?;
let client_rc = self.client.upgrade().expect("Expected client not found");
match client_rc.borrow_mut().send_to_client(
selector,
&ipv4_packet,
) {
Ok(_) => {
cx_debug!(
target: TAG,
self.id,
"Packet ({} bytes) sent to client",
ipv4_packet.length()
);
if log_enabled!(target: TAG, Level::Trace) {
cx_trace!(
target: TAG,
self.id,
"{}",
binary::to_string(ipv4_packet.raw())
);
}
}
Err(_) => cx_warn!(target: TAG, self.id, "Cannot send to client, drop packet"),
}
Ok(())
}
fn write(&mut self) -> io::Result<()> {
self.client_to_network.write_to(&mut self.socket)?;
Ok(())
}
fn update_interests(&mut self, selector: &mut Selector) {
let ready = if self.client_to_network.is_empty() {
Ready::readable()
} else {
Ready::readable() | Ready::writable()
};
cx_debug!(target: TAG, self.id, "interests: {:?}", ready);
if self.interests != ready {
// interests must be changed
self.interests = ready;
selector
.reregister(&self.socket, self.token, ready, PollOpt::level())
.expect("Cannot register on poll");
}
}
fn touch(&mut self) {
self.idle_since = Instant::now();
}
}
impl Connection for UdpConnection {
fn id(&self) -> &ConnectionId {
&self.id
}
fn send_to_network(
&mut self,
selector: &mut Selector,
_: &mut ClientChannel,
ipv4_packet: &Ipv4Packet,
) {
match self.client_to_network.read_from(
ipv4_packet.payload().expect(
"No payload",
),
|
Err(err) => {
cx_warn!(
target: TAG,
self.id,
"Cannot send to network, drop packet: {}",
err
)
}
}
}
fn close(&mut self, selector: &mut Selector) {
cx_info!(target: TAG, self.id, "Close");
self.closed = true;
selector.deregister(&self.socket, self.token).unwrap();
// socket will be closed by RAII
}
fn is_expired(&self) -> bool {
self.idle_since.elapsed().as_secs() > IDLE_TIMEOUT_SECONDS
}
fn is_closed(&self) -> bool {
self.closed
}
}
|
) {
Ok(_) => {
self.update_interests(selector);
}
|
random_line_split
|
udp_connection.rs
|
/*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cell::RefCell;
use std::io;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::{Rc, Weak};
use std::time::Instant;
use log::Level;
use mio::{Event, PollOpt, Ready, Token};
use mio::net::UdpSocket;
use super::binary;
use super::client::{Client, ClientChannel};
use super::connection::{Connection, ConnectionId};
use super::datagram_buffer::DatagramBuffer;
use super::ipv4_header::Ipv4Header;
use super::ipv4_packet::{Ipv4Packet, MAX_PACKET_LENGTH};
use super::packetizer::Packetizer;
use super::selector::Selector;
use super::transport_header::TransportHeader;
const TAG: &'static str = "UdpConnection";
pub const IDLE_TIMEOUT_SECONDS: u64 = 2 * 60;
pub struct UdpConnection {
id: ConnectionId,
client: Weak<RefCell<Client>>,
socket: UdpSocket,
interests: Ready,
token: Token,
client_to_network: DatagramBuffer,
network_to_client: Packetizer,
closed: bool,
idle_since: Instant,
}
impl UdpConnection {
pub fn new(
selector: &mut Selector,
id: ConnectionId,
client: Weak<RefCell<Client>>,
ipv4_header: Ipv4Header,
transport_header: TransportHeader,
) -> io::Result<Rc<RefCell<Self>>> {
cx_info!(target: TAG, id, "Open");
let socket = Self::create_socket(&id)?;
let packetizer = Packetizer::new(&ipv4_header, &transport_header);
let interests = Ready::readable();
let rc = Rc::new(RefCell::new(Self {
id: id,
client: client,
socket: socket,
interests: interests,
token: Token(0), // default value, will be set afterwards
client_to_network: DatagramBuffer::new(4 * MAX_PACKET_LENGTH),
network_to_client: packetizer,
closed: false,
idle_since: Instant::now(),
}));
{
let mut self_ref = rc.borrow_mut();
let rc2 = rc.clone();
// must anotate selector type: https://stackoverflow.com/a/44004103/1987178
let handler =
move |selector: &mut Selector, event| rc2.borrow_mut().on_ready(selector, event);
let token = selector.register(
&self_ref.socket,
handler,
interests,
PollOpt::level(),
)?;
self_ref.token = token;
}
Ok(rc)
}
fn create_socket(id: &ConnectionId) -> io::Result<UdpSocket> {
let autobind_addr = SocketAddr::new(Ipv4Addr::new(0, 0, 0, 0).into(), 0);
let udp_socket = UdpSocket::bind(&autobind_addr)?;
udp_socket.connect(id.rewritten_destination().into())?;
Ok(udp_socket)
}
fn remove_from_router(&self) {
// route is embedded in router which is embedded in client: the client necessarily exists
let client_rc = self.client.upgrade().expect("Expected client not found");
let mut client = client_rc.borrow_mut();
client.router().remove(self);
}
fn on_ready(&mut self, selector: &mut Selector, event: Event) {
match self.process(selector, event) {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(_) => panic!("Unexpected unhandled error"),
}
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process(&mut self, selector: &mut Selector, event: Event) -> io::Result<()> {
if !self.closed {
self.touch();
let ready = event.readiness();
if ready.is_readable() || ready.is_writable() {
if ready.is_writable() {
self.process_send(selector)?;
}
if !self.closed && ready.is_readable() {
self.process_receive(selector)?;
}
if !self.closed {
self.update_interests(selector);
}
} else {
// error or hup
self.close(selector);
}
if self.closed {
// on_ready is not called from the router, so the connection must remove itself
self.remove_from_router();
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_send(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.write() {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
cx_debug!(target: TAG, self.id, "Spurious event, ignoring")
}
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot write: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
// return Err(err) with err.kind() == io::ErrorKind::WouldBlock on spurious event
fn process_receive(&mut self, selector: &mut Selector) -> io::Result<()> {
match self.read(selector) {
Ok(_) => (),
Err(err) => {
if err.kind() == io::ErrorKind::WouldBlock {
// rethrow
return Err(err);
}
cx_error!(
target: TAG,
self.id,
"Cannot read: [{:?}] {}",
err.kind(),
err
);
self.close(selector);
}
}
Ok(())
}
fn read(&mut self, selector: &mut Selector) -> io::Result<()> {
let ipv4_packet = self.network_to_client.packetize(&mut self.socket)?;
let client_rc = self.client.upgrade().expect("Expected client not found");
match client_rc.borrow_mut().send_to_client(
selector,
&ipv4_packet,
) {
Ok(_) => {
cx_debug!(
target: TAG,
self.id,
"Packet ({} bytes) sent to client",
ipv4_packet.length()
);
if log_enabled!(target: TAG, Level::Trace) {
cx_trace!(
target: TAG,
self.id,
"{}",
binary::to_string(ipv4_packet.raw())
);
}
}
Err(_) => cx_warn!(target: TAG, self.id, "Cannot send to client, drop packet"),
}
Ok(())
}
fn write(&mut self) -> io::Result<()> {
self.client_to_network.write_to(&mut self.socket)?;
Ok(())
}
fn update_interests(&mut self, selector: &mut Selector) {
let ready = if self.client_to_network.is_empty() {
Ready::readable()
} else {
Ready::readable() | Ready::writable()
};
cx_debug!(target: TAG, self.id, "interests: {:?}", ready);
if self.interests != ready {
// interests must be changed
self.interests = ready;
selector
.reregister(&self.socket, self.token, ready, PollOpt::level())
.expect("Cannot register on poll");
}
}
fn touch(&mut self) {
self.idle_since = Instant::now();
}
}
impl Connection for UdpConnection {
fn id(&self) -> &ConnectionId {
&self.id
}
fn send_to_network(
&mut self,
selector: &mut Selector,
_: &mut ClientChannel,
ipv4_packet: &Ipv4Packet,
) {
match self.client_to_network.read_from(
ipv4_packet.payload().expect(
"No payload",
),
) {
Ok(_) => {
self.update_interests(selector);
}
Err(err) => {
cx_warn!(
target: TAG,
self.id,
"Cannot send to network, drop packet: {}",
err
)
}
}
}
fn close(&mut self, selector: &mut Selector) {
cx_info!(target: TAG, self.id, "Close");
self.closed = true;
selector.deregister(&self.socket, self.token).unwrap();
// socket will be closed by RAII
}
fn is_expired(&self) -> bool {
self.idle_since.elapsed().as_secs() > IDLE_TIMEOUT_SECONDS
}
fn
|
(&self) -> bool {
self.closed
}
}
|
is_closed
|
identifier_name
|
ListDescription.js
|
import _extends from 'babel-runtime/helpers/extends';
import cx from 'classnames';
import PropTypes from 'prop-types';
import React from 'react';
import { childrenUtils, createShorthandFactory, customPropTypes, getElementType, getUnhandledProps, META } from '../../lib';
/**
* A list item can contain a description.
*/
function ListDescription(props)
|
ListDescription.handledProps = ['as', 'children', 'className', 'content'];
ListDescription._meta = {
name: 'ListDescription',
parent: 'List',
type: META.TYPES.ELEMENT
};
ListDescription.propTypes = process.env.NODE_ENV !== "production" ? {
/** An element type to render as (string or function). */
as: customPropTypes.as,
/** Primary content. */
children: PropTypes.node,
/** Additional classes. */
className: PropTypes.string,
/** Shorthand for primary content. */
content: customPropTypes.contentShorthand
} : {};
ListDescription.create = createShorthandFactory(ListDescription, function (content) {
return { content: content };
});
export default ListDescription;
|
{
var children = props.children,
className = props.className,
content = props.content;
var classes = cx(className, 'description');
var rest = getUnhandledProps(ListDescription, props);
var ElementType = getElementType(ListDescription, props);
return React.createElement(
ElementType,
_extends({}, rest, { className: classes }),
childrenUtils.isNil(children) ? content : children
);
}
|
identifier_body
|
ListDescription.js
|
import _extends from 'babel-runtime/helpers/extends';
import cx from 'classnames';
import PropTypes from 'prop-types';
import React from 'react';
import { childrenUtils, createShorthandFactory, customPropTypes, getElementType, getUnhandledProps, META } from '../../lib';
/**
* A list item can contain a description.
*/
function
|
(props) {
var children = props.children,
className = props.className,
content = props.content;
var classes = cx(className, 'description');
var rest = getUnhandledProps(ListDescription, props);
var ElementType = getElementType(ListDescription, props);
return React.createElement(
ElementType,
_extends({}, rest, { className: classes }),
childrenUtils.isNil(children) ? content : children
);
}
ListDescription.handledProps = ['as', 'children', 'className', 'content'];
ListDescription._meta = {
name: 'ListDescription',
parent: 'List',
type: META.TYPES.ELEMENT
};
ListDescription.propTypes = process.env.NODE_ENV !== "production" ? {
/** An element type to render as (string or function). */
as: customPropTypes.as,
/** Primary content. */
children: PropTypes.node,
/** Additional classes. */
className: PropTypes.string,
/** Shorthand for primary content. */
content: customPropTypes.contentShorthand
} : {};
ListDescription.create = createShorthandFactory(ListDescription, function (content) {
return { content: content };
});
export default ListDescription;
|
ListDescription
|
identifier_name
|
ListDescription.js
|
import _extends from 'babel-runtime/helpers/extends';
import cx from 'classnames';
import PropTypes from 'prop-types';
import React from 'react';
import { childrenUtils, createShorthandFactory, customPropTypes, getElementType, getUnhandledProps, META } from '../../lib';
/**
* A list item can contain a description.
*/
function ListDescription(props) {
var children = props.children,
className = props.className,
content = props.content;
var classes = cx(className, 'description');
var rest = getUnhandledProps(ListDescription, props);
var ElementType = getElementType(ListDescription, props);
return React.createElement(
ElementType,
_extends({}, rest, { className: classes }),
childrenUtils.isNil(children) ? content : children
|
}
ListDescription.handledProps = ['as', 'children', 'className', 'content'];
ListDescription._meta = {
name: 'ListDescription',
parent: 'List',
type: META.TYPES.ELEMENT
};
ListDescription.propTypes = process.env.NODE_ENV !== "production" ? {
/** An element type to render as (string or function). */
as: customPropTypes.as,
/** Primary content. */
children: PropTypes.node,
/** Additional classes. */
className: PropTypes.string,
/** Shorthand for primary content. */
content: customPropTypes.contentShorthand
} : {};
ListDescription.create = createShorthandFactory(ListDescription, function (content) {
return { content: content };
});
export default ListDescription;
|
);
|
random_line_split
|
scatter.py
|
from __future__ import division
import numpy as np
from bokeh.plotting import figure, HBox, output_file, show, VBox
from bokeh.models import Range1d
# create some data using python lists
x1 = [1, 2, 5, 7, -8, 5, 2, 7, 1, -3, -5, 1.7, 5.4, -5]
y1 = [5, 6, -3, 1.5, 2, 1, 1, 9, 2.4, -3, 6, 8, 2, 4]
# create some data using numpy arrays
x2 = np.random.random(size=100) * 20 - 10
y2 = np.random.random(size=100) * 20 - 10
# EXERCISE: create some data for x3 and y3 however you like
# EXERCISE: output static HTML file
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
# EXERCISE: create two Range1d objects to reuse in the plots. Use the [-10, 10]
# for the bounds. Note: Range1d's are initialized like: Range1d(start=0, end=1)
# EXERCISE: Plot all the sets of points on different plots p1, p2, p3. Use the
# ranges above for `x_range` and `y_range` for each figure. Set different colors
# as well. Try setting line_color and fill_color instead of just color. You can
# also set alpha, line_alpha, and fill_alpha if you like. Set tools to TOOLS on
# the figures. Change the value of the 'marker' parameter, "circle", "square",
# "triangle", etc. One example is given
p1 = figure(x_range=xr, y_range=yr, tools=TOOLS, plot_width=300, plot_height=300)
p1.scatter(x1, y1, size=12, color="red", alpha=0.5)
# EXERCISE: Try panning and zooming one of the plots with another one visible!
# Set the plot_width and plot_height to smaller if necessary
# EXERCISE: create a new figure p4
# Lets plot 4000 circles, you can play around with this if you like
N = 4000
# Create a bunch of random points, radii and colors for plotting
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))
]
# EXERCISE: use the `circle` renderer to scatter all the circles. Set the
# 'fill_color' to the colors above, the 'line_color' to None, and the 'radius'
# to the radii. Also try setting the fill_alpha to something less than one.
# Use TOOLS from above to set a tools parameter.
# NOTE: since we are passing 'radius' as a parameter, the size of the circles
# is computed in **data** space, not in pixels. If you'd like to specify
# radii in pixels, use: radius_units="screen"
# show the plots arrayed in a layout
|
show(VBox(HBox(p1, p2, p3), p4))
|
random_line_split
|
|
pan.rs
|
use traits::{SoundSample, SampleValue, stereo_value};
use params::*;
use super::Efx;
// 0. => all right 1. => all left
declare_params!(PanParams { pan: 0.5 });
pub struct Pan {
params: PanParams,
}
impl Parametrized for Pan {
fn get_parameters(&mut self) -> &mut Parameters {
&mut self.params
}
}
impl Pan {
pub fn new(params: PanParams) -> Pan {
Pan { params: params }
}
}
impl Efx for Pan {
fn sample(&mut self, sample: SampleValue) -> SoundSample {
let right_v = self.params.pan.value();
let left_v = 1. - right_v;
match sample {
SampleValue::Mono(x) => stereo_value(x * right_v, x * left_v),
SampleValue::Stereo(r, l) => stereo_value(right_v * r, left_v * l), // ??
}
|
}
|
}
|
random_line_split
|
pan.rs
|
use traits::{SoundSample, SampleValue, stereo_value};
use params::*;
use super::Efx;
// 0. => all right 1. => all left
declare_params!(PanParams { pan: 0.5 });
pub struct
|
{
params: PanParams,
}
impl Parametrized for Pan {
fn get_parameters(&mut self) -> &mut Parameters {
&mut self.params
}
}
impl Pan {
pub fn new(params: PanParams) -> Pan {
Pan { params: params }
}
}
impl Efx for Pan {
fn sample(&mut self, sample: SampleValue) -> SoundSample {
let right_v = self.params.pan.value();
let left_v = 1. - right_v;
match sample {
SampleValue::Mono(x) => stereo_value(x * right_v, x * left_v),
SampleValue::Stereo(r, l) => stereo_value(right_v * r, left_v * l), // ??
}
}
}
|
Pan
|
identifier_name
|
pan.rs
|
use traits::{SoundSample, SampleValue, stereo_value};
use params::*;
use super::Efx;
// 0. => all right 1. => all left
declare_params!(PanParams { pan: 0.5 });
pub struct Pan {
params: PanParams,
}
impl Parametrized for Pan {
fn get_parameters(&mut self) -> &mut Parameters {
&mut self.params
}
}
impl Pan {
pub fn new(params: PanParams) -> Pan
|
}
impl Efx for Pan {
fn sample(&mut self, sample: SampleValue) -> SoundSample {
let right_v = self.params.pan.value();
let left_v = 1. - right_v;
match sample {
SampleValue::Mono(x) => stereo_value(x * right_v, x * left_v),
SampleValue::Stereo(r, l) => stereo_value(right_v * r, left_v * l), // ??
}
}
}
|
{
Pan { params: params }
}
|
identifier_body
|
device_tracker.py
|
"""Device tracker support for OPNSense routers."""
from homeassistant.components.device_tracker import DeviceScanner
from . import CONF_TRACKER_INTERFACE, OPNSENSE_DATA
async def async_get_scanner(hass, config, discovery_info=None):
"""Configure the OPNSense device_tracker."""
interface_client = hass.data[OPNSENSE_DATA]["interfaces"]
scanner = OPNSenseDeviceScanner(
interface_client, hass.data[OPNSENSE_DATA][CONF_TRACKER_INTERFACE]
)
return scanner
class OPNSenseDeviceScanner(DeviceScanner):
"""This class queries a router running OPNsense."""
def __init__(self, client, interfaces):
"""Initialize the scanner."""
self.last_results = {}
self.client = client
self.interfaces = interfaces
def _get_mac_addrs(self, devices):
"""Create dict with mac address keys from list of devices."""
out_devices = {}
for device in devices:
if not self.interfaces:
out_devices[device["mac"]] = device
elif device["intf_description"] in self.interfaces:
out_devices[device["mac"]] = device
return out_devices
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update_info()
return list(self.last_results)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if device not in self.last_results:
|
hostname = self.last_results[device].get("hostname") or None
return hostname
def update_info(self):
"""Ensure the information from the OPNSense router is up to date.
Return boolean if scanning successful.
"""
devices = self.client.get_arp()
self.last_results = self._get_mac_addrs(devices)
def get_extra_attributes(self, device):
"""Return the extra attrs of the given device."""
if device not in self.last_results:
return None
if not (mfg := self.last_results[device].get("manufacturer")):
return {}
return {"manufacturer": mfg}
|
return None
|
conditional_block
|
device_tracker.py
|
"""Device tracker support for OPNSense routers."""
from homeassistant.components.device_tracker import DeviceScanner
from . import CONF_TRACKER_INTERFACE, OPNSENSE_DATA
async def async_get_scanner(hass, config, discovery_info=None):
|
class OPNSenseDeviceScanner(DeviceScanner):
"""This class queries a router running OPNsense."""
def __init__(self, client, interfaces):
"""Initialize the scanner."""
self.last_results = {}
self.client = client
self.interfaces = interfaces
def _get_mac_addrs(self, devices):
"""Create dict with mac address keys from list of devices."""
out_devices = {}
for device in devices:
if not self.interfaces:
out_devices[device["mac"]] = device
elif device["intf_description"] in self.interfaces:
out_devices[device["mac"]] = device
return out_devices
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update_info()
return list(self.last_results)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if device not in self.last_results:
return None
hostname = self.last_results[device].get("hostname") or None
return hostname
def update_info(self):
"""Ensure the information from the OPNSense router is up to date.
Return boolean if scanning successful.
"""
devices = self.client.get_arp()
self.last_results = self._get_mac_addrs(devices)
def get_extra_attributes(self, device):
"""Return the extra attrs of the given device."""
if device not in self.last_results:
return None
if not (mfg := self.last_results[device].get("manufacturer")):
return {}
return {"manufacturer": mfg}
|
"""Configure the OPNSense device_tracker."""
interface_client = hass.data[OPNSENSE_DATA]["interfaces"]
scanner = OPNSenseDeviceScanner(
interface_client, hass.data[OPNSENSE_DATA][CONF_TRACKER_INTERFACE]
)
return scanner
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.