file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
document.py | 0000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def | (self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
| supports_records | identifier_name |
document.py | 0000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
|
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
| value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value) | conditional_block |
document.py | 0000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
|
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
| flag_modified(self, 'meta') | identifier_body |
document.py | 0000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name |
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set | if self.source_url is not None:
return self.source_url | random_line_split |
util.go | /src/client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits | jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
Salt | branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
} | random_line_split |
util.go | /client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) |
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
| {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
} | identifier_body |
util.go | /client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok |
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
| {
input.Cron.Commit = commit.ID
} | conditional_block |
util.go | /client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func | (resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
S | getResourceListFromSpec | identifier_name |
condition_strategy_generators.rs | resenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds |
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println | {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
} | conditional_block |
condition_strategy_generators.rs | rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward != 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward != 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value != 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
}) | .collect();
let mut result = self.clone(); | random_line_split |
|
condition_strategy_generators.rs | resenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds | }
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", | {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
} | identifier_body |
condition_strategy_generators.rs | resenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct | <'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println | HillClimbSeedInfo | identifier_name |
helicorder.ts | else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get height(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv | {
let selectedStyle = seismograph.shadowRoot?.querySelector("style.selection");
if ( ! selectedStyle) {
selectedStyle = document.createElement('style');
seismograph.shadowRoot?.insertBefore(selectedStyle, seismograph.shadowRoot?.firstChild);
selectedStyle.setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} | conditional_block |
|
helicorder.ts | .setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get height(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px; | }
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, | justify-content: space-between;
width: 100%;
z-index: -1;
}
`; | random_line_split |
helicorder.ts | .setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get | (): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, | height | identifier_name |
helicorder.ts | - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, lineInterval: Interval): SeismogramDisplayData {
let lineCutSeis = null;
let lineSeisData;
if (singleSeisData.seismogram) {
lineCutSeis = singleSeisData.seismogram.cut(lineInterval);
if (lineCutSeis && this.heliConfig.detrendLines) {
lineCutSeis = removeTrend(lineCutSeis);
}
lineSeisData = singleSeisData.cloneWithNewSeismogram(lineCutSeis);
} else {
// no data in window, but keep seisData in case of markers, etc
lineSeisData = singleSeisData.clone();
}
lineSeisData.timeRange = lineInterval;
return lineSeisData;
}
/**
* Calculates the time range covered by each line of the display
*
* @param startTime start of display
* @param secondsPerLine seconds covered by each line
* @param numberOfLines number of lines
* @returns Array of HeliTimeRange, one per line
*/
calcTimesForLines(
startTime: DateTime,
secondsPerLine: number,
numberOfLines: number,
): Array<HeliTimeRange> {
const out = [];
let s = startTime;
const durationPerLine = Duration.fromMillis(secondsPerLine*1000);
for (let lineNum = 0; lineNum < numberOfLines; lineNum++) {
const startEnd = new HeliTimeRange(s, durationPerLine, lineNum);
out.push(startEnd);
s = startEnd.interval.end;
}
return out;
}
calcDetailForEvent(evt: MouseEvent): HeliMouseEventType {
const heliMargin = this.heliConfig.margin;
const margin = this.heliConfig.lineSeisConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - (heliMargin.top+heliMargin.bottom )) /
(nl - (nl - 1) * this.heliConfig.overlap);
let clickLine = 0;
if (evt.offsetY < heliMargin.top+baseHeight*(0.5)) {
clickLine = 0;
} else {
clickLine = Math.round(((evt.offsetY-heliMargin.top)-baseHeight*(0.5))/
(baseHeight*(1-this.heliConfig.overlap)));
}
const timeRange = this.heliConfig.fixedTimeScale;
if ( timeRange ) {
const timeLineFraction = (evt.offsetX-margin.left)/(this.width-margin.left-margin.right);
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
const clickTime = timeRange.start.plus(Duration.fromMillis((clickLine+timeLineFraction)*secondsPerLine*1000));
return {
mouseevent: evt,
time: clickTime,
lineNum: clickLine,
};
} else {
throw new Error("Helicorder must be fixedTimeScale");
}
}
}
export const DEFAULT_MAX_HEIGHT = 600;
/**
* Configuration of the helicorder
*
* Note that setting maxVariation=0 and fixedAmplitudeScale=[0,0] will scale the
* data to max
*
* @param timeRange the time range covered by the helicorder, required
*/
export class HelicorderConfig extends SeismographConfig {
lineSeisConfig: SeismographConfig;
overlap: number;
numLines: number;
maxVariation: number;
detrendLines = false;
constructor(timeRange: Interval) {
super();
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
this.fixedTimeScale = timeRange;
this.maxVariation = 0;
this.maxHeight = DEFAULT_MAX_HEIGHT;
this.xLabel = "";
this.yLabel = "";
this.xSublabel = "";
this.ySublabel = " ";
this.ySublabelIsUnits = false;
this.isXAxis = true;
this.isXAxisTop = true;
this.isYAxis = false;
this.overlap = 0.5;
this.numLines = 12;
this.margin.left = 0;
this.margin.right = 0;
this.margin.top = 40;
this.lineColors = ["skyblue", "olivedrab", "goldenrod"];
this.lineSeisConfig = new SeismographConfig();
this.lineSeisConfig.ySublabel = ` `;
this.lineSeisConfig.xLabel = " ";
this.lineSeisConfig.yLabel = ""; // replace later with `${startTime.toFormat("HH:mm")}`;
this.lineSeisConfig.yLabelOrientation = "horizontal";
this.lineSeisConfig.ySublabelIsUnits = false;
this.lineSeisConfig.isXAxis = false;
this.lineSeisConfig.isYAxis = false;
this.lineSeisConfig.minHeight = 80;
this.lineSeisConfig.margin.top = 0;
this.lineSeisConfig.margin.bottom = 0;
this.lineSeisConfig.margin.left = 37;
this.lineSeisConfig.margin.right = 37;
this.lineSeisConfig.wheelZoom = false;
}
static fromSeismographConfig(seisConfig: SeismographConfig): HelicorderConfig {
if (! seisConfig.fixedTimeScale) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
const heliConfig = new HelicorderConfig(seisConfig.fixedTimeScale);
heliConfig.lineSeisConfig = seisConfig;
heliConfig.lineColors = seisConfig.lineColors;
return heliConfig;
}
}
/**
* Time range for a single line of the helicorder, extends Interval
* to add the line number
*/
export class HeliTimeRange {
lineNumber: number;
interval: Interval;
constructor(
startTime: DateTime,
duration: Duration,
lineNumber: number,
) | {
this.interval = Interval.after(startTime, duration);
this.lineNumber = lineNumber;
} | identifier_body |
|
testkeras.py | ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
def flatten(column):
'''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
'''
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
| files = glob.glob(files_path)
# -- process ntuples into rec arrays | random_line_split |
|
testkeras.py | 7']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
| plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black") | conditional_block |
|
testkeras.py |
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices | '''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
''' | identifier_body |
|
testkeras.py | _df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def | plot_confusion_matrix | identifier_name |
|
runner.rs | Cell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where | #[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some | F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
| random_line_split |
runner.rs | , sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> |
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if | {
match self {
Error::RendererError(e) => Some(e),
}
} | identifier_body |
runner.rs | , sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn | () -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let | elapsed | identifier_name |
humantoken.rs | os("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if !attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if !input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
// Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pr | etty { | identifier_name |
|
humantoken.rs | }
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
#[test]
fn test_display() {
assert_eq!("0 FIL", format!("{}", attos("0").pretty()));
// Absolute works
assert_eq!("1 attoFIL", format!("{}", attos("1").pretty()));
assert_eq!(
"0.000000000000000001 FIL",
format!("{:#}", attos("1").pretty())
);
// We select the right suffix
assert_eq!("1 femtoFIL", format!("{}", attos("1000").pretty()));
assert_eq!("1001 attoFIL", format!("{}", attos("1001").pretty()));
// If you ask for 0 precision, you get it
assert_eq!("~0 FIL", format!("{:.0}", attos("1001").pretty()));
// Rounding without a prefix
assert_eq!("~10 FIL", format!("{:.1}", fils("11").pretty()));
// Rounding with absolute
assert_eq!(
"~0.000000000000002 FIL",
format!("{:#.1}", attos("1940").pretty())
);
assert_eq!(
"~0.0000000000000019 FIL",
format!("{:#.2}", attos("1940").pretty())
);
assert_eq!(
"0.00000000000000194 FIL",
format!("{:#.3}", attos("1940").pretty())
);
// Small numbers with a gap then a trailing one are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.2}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.3}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.4}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.5}", attos("1001").pretty()));
// Small numbers with trailing numbers are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1234").pretty()));
assert_eq!("~1200 attoFIL", format!("{:.2}", attos("1234").pretty()));
assert_eq!("~1230 attoFIL", format!("{:.3}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.4}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.5}", attos("1234").pretty()));
// Small numbers are rounded appropriately
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1900").pretty()));
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1500").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1400").pretty()));
// Big numbers with a gap then a trailing one are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.2}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.3}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.4}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.5}", fils("1001").pretty()));
// Big numbers with trailing numbers are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1234").pretty()));
assert_eq!("~1200 FIL", format!("{:.2}", fils("1234").pretty()));
assert_eq!("~1230 FIL", format!("{:.3}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.4}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.5}", fils("1234").pretty()));
// Big numbers are rounded appropriately
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1900").pretty()));
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1500").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1400").pretty()));
}
}
}
#[cfg(test)]
mod fuzz {
use quickcheck::quickcheck;
use super::*;
quickcheck! {
fn roundtrip(expected: crate::shim::econ::TokenAmount) -> () {
// Default formatting
let actual = parse(&format!("{}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Absolute formatting
let actual = parse(&format!("{:#}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Don't test rounded formatting...
}
}
| quickcheck! {
fn parser_no_panic(s: String) -> () {
let _ = parse(&s);
}
} | random_line_split |
|
humantoken.rs | use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if !attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if !input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
| input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
| // Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind( | identifier_body |
eccrypto.py | 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def | (self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
| __init__ | identifier_name |
eccrypto.py | 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
|
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
| x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y) | identifier_body |
eccrypto.py | return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack(
"!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def decode_public_key(self, public_key):
return self._decode_public_key(public_key) |
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01"
if is_compressed else b"") | random_line_split |
|
eccrypto.py | 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
|
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
| q //= 2
s += 1 | conditional_block |
github.go | .clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{pull, "", false}
g.trackedPullRequests[pullID] = status
}
// we want to check to see if we are already building or already built this commit
// and we want to cancel the previous build
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if build.Config().GetMetadata("github:HeadHash") == *pull.Head.SHA {
logwarnf("Already building/built this commit")
return
}
if app.config.CancelOnNewCommit {
build.Stop()
}
}
headBranch := *pull.Head.Ref
headCloneURL := *pull.Head.Repo.SSHURL
headCommit := *pull.Head.SHA
headOwner := *pull.Head.Repo.Owner.Login
headRepo := *pull.Head.Repo.Name
baseBranch := *pull.Base.Ref
baseCloneURL := *pull.Base.Repo.SSHURL
baseOwner := *pull.Base.Repo.Owner.Login
baseRepo := *pull.Base.Repo.Name
baseCommit := *pull.Base.SHA
buildConfig := core.NewBuildConfig()
buildConfig.Title = *pull.Title
buildConfig.URL = *pull.HTMLURL
buildConfig.HeadRepo = headCloneURL
buildConfig.HeadBranch = headBranch
buildConfig.HeadHash = headCommit
buildConfig.BaseRepo = baseCloneURL
buildConfig.BaseBranch = baseBranch
buildConfig.BaseHash = ""
buildConfig.Group = pullID
buildConfig.SetMetadata("github:BuildType", "pullrequest")
buildConfig.SetMetadata("github:PullRequestID", pullID)
buildConfig.SetMetadata("github:PullNumber", fmt.Sprintf("%d", *pull.Number))
buildConfig.SetMetadata("github:HeadHash", headCommit)
buildConfig.SetMetadata("github:HeadOwner", headOwner)
buildConfig.SetMetadata("github:HeadRepo", headRepo)
buildConfig.SetMetadata("github:BaseHash", baseCommit)
buildConfig.SetMetadata("github:BaseOwner", baseOwner)
buildConfig.SetMetadata("github:BaseRepo", baseRepo)
buildToken, err := app.app.NewBuild(buildConfig.Group, buildConfig)
if err != nil {
logcritf("Couldn't start build for %d", *pull.ID)
return
}
build, err := app.app.GetBuild(buildToken)
if err != nil || build == nil {
logcritf("Couldn't get build for %d", *pull.ID)
return
}
status.currentBuild = buildToken
g.trackedPullRequests[pullID] = status
loginfof("started build: %s", buildToken)
}
func (g *Github) updatePullRequest(app *githubApp, event *github.PullRequestEvent) {
// this is called when there is a new commit on the pull request or something like that
pullID := strconv.Itoa(*event.PullRequest.ID)
g.m.RLock()
_, ok := g.trackedPullRequests[pullID]
g.m.RUnlock()
if ok == false {
logwarnf("event on unknown/ignored pull request: %s", pullID)
g.trackPullRequest(app, event)
return
}
g.buildPullRequest(app, event.PullRequest)
}
func (g *Github) closedPullRequest(app *githubApp, event *github.PullRequestEvent) {
g.m.RLock()
defer g.m.RUnlock()
pullID := strconv.Itoa(*event.PullRequest.ID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
return
}
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if app.config.CancelOnNewCommit {
build.Stop()
}
}
delete(g.trackedPullRequests, pullID)
}
func loginfof(str string, args ...interface{}) (ret string) { | random_line_split |
||
github.go | ]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "[email protected]:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) acquireOauthToken() {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
lo | urn nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus | gcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
ret | conditional_block |
github.go | [string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "[email protected]:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) | () {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus | acquireOauthToken | identifier_name |
github.go | [string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "[email protected]:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) acquireOauthToken() {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for | old the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{ | _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// h | identifier_body |
image_utils.py |
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string
EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
|
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to | start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones | conditional_block |
image_utils.py | {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to 0-1 range".format(img.shape))
logging.info("Scaled image to [0-1] and reshaped to {}".format(new_shape))
predicted_cluster = kmeans.predict(original_pixels)
# TODO: Document this Numpy behaviour - indexing one array with an integer array of indices
# Creates a new array, of length equal to indexing array
# test_a = predicted_cluster[0:10]
# test_asdf = kmeans.cluster_centers_
# test_asdf[test_a]
img_clustered_pixels = kmeans.cluster_centers_[predicted_cluster]
logging.info("Assigned each pixel to a cluster (color vector).".format())
img_clustered_pixels = img_clustered_pixels.reshape(img.shape)
logging.info("Reshape pixels back to original shape".format())
logging.info("Returning KMeans fit image canvas".format())
# img_clustered_pixels
return img_clustered_pixels
def convert_rgb_img_to_b64string_straight(img):
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
def convert_rgb_img_to_b64string(img):
# Convert image to BGR from RGB
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text | identifier_body |
|
image_utils.py | EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# | img The image as an ndarray
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string | random_line_split |
|
image_utils.py |
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string
EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def | (self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to | draw_ellipses_img | identifier_name |
reconciler.go | util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
}
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
log := r.Logger.WithField("func", "Reconcile")
log.Infof("Start ...")
util.KubeCheck(r.NooBaa)
if r.NooBaa.UID == "" {
log.Infof("NooBaa not found or already deleted. Skip reconcile.")
return reconcile.Result{}, nil
}
err := r.RunReconcile()
if util.IsPersistentError(err) {
log.Errorf("❌ Persistent Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
| util.KubeCheck(r.CoreApp)
util.KubeCheck(r.ServiceMgmt)
util.KubeCheck(r.ServiceS3)
util.KubeCheck(r.SecretServer)
util.KubeCheck(r.SecretOp) | random_line_split |
|
reconciler.go | : %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
| r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
log.Infof("Parsed image (NamedTagged) %v", image)
imageName = image.Name()
imageTag = image.Tag()
case dockerref.Tagged:
log.Infof("Parsed image (Tagged) %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image % | if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if | conditional_block |
reconciler.go | %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image %q contraints %q", imageRef.String(), ContainerImageConstraint.String())
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image version requested %q, I hope you know what you're doing ...`, imageRef)
}
}
} else {
log.Infof("Using custom image name %q the default is %q", imageRef.String(), options.ContainerImageName)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image requested %q, I hope you know what you're doing ...`, imageRef)
}
}
// Set ActualImage to be updated in the noobaa status
r.NooBaa.Status.ActualImage = specImage
return nil
}
// CheckServiceStatus populates the status of a service by detecting all of its addresses
func (r *Reconciler) CheckServiceStatus(srv *corev1.Service, status *nbv1.ServiceStatus, portName string) {
log := r.Logger.WithField("func", "CheckServiceStatus").WithField("service", srv.Name)
*status = nbv1.ServiceStatus{}
servicePort := nb.FindPortByName(srv, portName)
proto := "http"
if strings.HasSuffix(portName, "https") {
proto = "https"
}
// Node IP:Port
// Pod IP:Port
pods := corev1.PodList{}
podsListOptions := &client.ListOptions{
Namespace: r.Request.Namespace,
LabelSelector: labels.SelectorFromSet(srv.Spec.Selector),
}
err := r.Client.List(r.Ctx, podsListOptions, &pods)
if err == nil {
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
if pod.Status.HostIP != "" {
status.NodePorts = append(
status.NodePorts,
fmt.Sprintf("%s://%s:%d", proto, pod.Status.HostIP, servicePort.NodePort),
)
}
if pod.Status.PodIP != "" {
status.PodPorts = append(
status.PodPorts,
fmt.Sprintf("%s://%s:%s", proto, pod.Status.PodIP, servicePort.TargetPort.String()),
)
}
}
}
}
// Cluster IP:Port (of the service)
if srv.Spec.ClusterIP != "" {
status.InternalIP = append(
status.InternalIP,
fmt.Sprintf("%s://%s:%d", proto, srv.Spec.ClusterIP, servicePort.Port),
)
status.InternalDNS = append(
status.InternalDNS,
fmt.Sprintf("%s://%s.%s:%d", proto, srv.Name, srv.Namespace, servicePort.Port),
)
}
// LoadBalancer IP:Port (of the service)
if srv.Status.LoadBalancer.Ingress != nil {
for _, lb := range srv.Status.LoadBalancer.Ingress {
if lb.IP != "" {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, lb.IP, servicePort.Port),
)
}
if lb.Hostname != "" {
status.ExternalDNS = append(
status.ExternalDNS,
fmt.Sprintf("%s://%s:%d", proto, lb.Hostname, servicePort.Port),
)
}
}
}
// External IP:Port (of the service)
if srv.Spec.ExternalIPs != nil {
for _, ip := range srv.Spec.ExternalIPs {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, ip, servicePort.Port),
)
}
}
log.Infof("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
lo | g := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{ | identifier_body |
|
reconciler.go | ("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
log := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: secretAdminName,
Labels: map[string]string{"app": "noobaa"},
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"system": name,
"email": options.AdminAccountEmail,
"password": string(r.SecretOp.Data["password"]),
},
}
log.Infof("listing accounts")
res, err := r.NBClient.ListAccountsAPI()
if err != nil {
return err
}
for _, account := range res.Accounts {
if account.Email == options.AdminAccountEmail {
if len(account.AccessKeys) > 0 {
r.SecretAdmin.StringData["AWS_ACCESS_KEY_ID"] = account.AccessKeys[0].AccessKey
r.SecretAdmin.StringData["AWS_SECRET_ACCESS_KEY"] = account.AccessKeys[0].SecretKey
}
}
}
r.Own(r.SecretAdmin)
return r.Client.Create(r.Ctx, r.SecretAdmin)
}
var readmeTemplate = template.Must(template.New("NooBaaSystem.Status.Readme").Parse(`
Welcome to NooBaa!
-----------------
Lets get started:
1. Connect to Management console:
Read your mgmt console login information (email & password) from secret: "{{.SecretAdmin.Name}}".
kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq '.data|map_values(@base64d)'
Open the management console service - take External IP/DNS or Node Port or use port forwarding:
kubectl port-forward -n {{.ServiceMgmt.Namespace}} service/{{.ServiceMgmt.Name}} 11443:8443 &
open https://localhost:11443
2. Test S3 client:
kubectl port-forward -n {{.ServiceS3.Namespace}} service/{{.ServiceS3.Name}} 10443:443 &
NOOBAA_ACCESS_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d')
NOOBAA_SECRET_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d')
alias s3='AWS_ACCESS_KEY_ID=$NOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=$NOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3'
s3 ls
`))
// Complete populates the noobaa status at the end of reconcile.
func (r *Reconciler) Complete() error {
var readmeBuffer bytes.Buffer
err := readmeTemplate.Execute(&readmeBuffer, r)
if err != nil {
return err
}
r.NooBaa.Status.Readme = readmeBuffer.String()
r.NooBaa.Status.Accounts.Admin.SecretRef.Name = r.SecretAdmin.Name
r.NooBaa.Status.Accounts.Admin.SecretRef.Namespace = r.SecretAdmin.Namespace
return nil
}
// Own sets the object owner references to the noobaa system
func (r *Reconciler) Own(obj metav1.Object) {
util.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))
}
// GetObject gets an object by name from the request namespace.
func (r *Reconciler) GetObject(name string, obj runtime.Object) error {
return r.Client.Get(r.Ctx, client.ObjectKey{Namespace: r.Request.Namespace, Name: name}, obj)
}
// ReconcileObject is a generic call to reconcile a kubernetes object
// desiredFunc can be passed to modify the object before create/update.
// Currently we ignore enforcing a desired state, but it might be needed on upgrades.
func (r *Reconciler) ReconcileObject(obj runtime.Object, desiredFunc func()) error {
kind := obj.GetObjectKind().GroupVersionKind().Kind
objMeta, _ := meta.Accessor(obj)
log := r.Logger.
WithField("func", "ReconcileObject").
WithField("kind", kind).
WithField("name", objMeta.GetName())
r.Own(objMeta)
op, err := controllerutil.CreateOrUpdate(
r.Ctx, r.Client, obj.(runtime.Object),
func(obj runtime.Object) error {
if desiredFunc != nil {
desiredFunc()
}
return nil
},
)
if err != nil {
log.Errorf("ReconcileObject Failed: %v", err)
return err
}
log.Infof("Done. op=%s", op)
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPha | se(phase | identifier_name |
|
lib.rs | /// Type for wrapping Vec<u8> data in cases you need to do a convenient
/// enum variant display derives with `#[display(inner)]`
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", transparent)
)]
#[derive(
Wrapper, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default, Display,
From
)]
#[derive(StrictEncode, StrictDecode)]
#[wrap(
Index,
IndexMut,
IndexRange,
IndexFull,
IndexFrom,
IndexTo,
IndexInclusive
)]
#[display(Vec::bech32_data_string)]
// We get `(To)Bech32DataString` and `FromBech32DataString` for free b/c
// the wrapper creates `From<Vec<u8>>` impl for us, which with rust stdlib
// implies `TryFrom<Vec<u8>>`, for which we have auto trait derivation
// `FromBech32Payload`, for which the traits above are automatically derived
pub struct Blob(
#[cfg_attr(feature = "serde", serde(with = "As::<Hex>"))] Vec<u8>,
);
impl AsRef<[u8]> for Blob {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl Debug for Blob {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Blob({})", self.0.to_hex())
}
}
impl FromStr for Blob {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Blob::from_bech32_data_str(s)
}
}
/// Convertor trait for extracting data from a given type which will be part of
/// Bech32 payload
pub trait ToBech32Payload {
/// Must return a vector with Bech32 payload data
fn to_bech32_payload(&self) -> Vec<u8>;
}
/// Extracts data representation from a given type which will be part of Bech32
/// payload
pub trait AsBech32Payload {
/// Must return a reference to a slice representing Bech32 payload data
fn as_bech32_payload(&self) -> &[u8];
}
impl<T> AsBech32Payload for T
where
T: AsRef<[u8]>,
{
fn as_bech32_payload(&self) -> &[u8] { self.as_ref() }
}
/// Convertor which constructs a given type from Bech32 payload data
pub trait FromBech32Payload
where
Self: Sized,
{
/// Construct type from Bech32 payload data
fn from_bech32_payload(payload: Vec<u8>) -> Result<Self, Error>;
}
impl<T> FromBech32Payload for T
where
T: TryFrom<Vec<u8>>,
Error: From<T::Error>,
{
fn from_bech32_payload(payload: Vec<u8>) -> Result<T, Error> {
Ok(T::try_from(payload)?)
}
}
// -- Common (non-LNPBP-39) traits
/// Creates Bech32 string with appropriate type data representation.
/// Depending on the specific type, this may be `id`-string, `data`-string,
/// `z`-string or other type of HRP.
pub trait ToBech32String {
/// Creates Bech32 string with appropriate type data representation
fn to_bech32_string(&self) -> String;
}
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
pub trait FromBech32Str {
/// Specifies which HRP is used by Bech32 string representing this data type
const HRP: &'static str;
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
fn from_bech32_str(s: &str) -> Result<Self, Error>
where
Self: Sized;
}
/// Strategies for automatic implementation of the Bech32 traits
pub mod strategies {
use amplify::{Holder, Wrapper};
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
/// Strategy for Bech32 representation as uncompressed data (starting from
/// `data1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct UsingStrictEncoding;
/// Strategy for Bech32 representation of the newtypes wrapping other types.
/// The strategy simply inherits Bech32 representation from the inner type.
pub struct Wrapped;
#[cfg(feature = "zip")]
/// Strategy for Bech32 representation as compressed data (starting from
/// `z1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct CompressedStrictEncoding;
/// Helper trait for implementing specific strategy for Bech32 construction
pub trait Strategy {
/// Bech32 HRP prefix used by a type
const HRP: &'static str;
/// Specific strategy used for automatic implementation of all
/// Bech32-related traits.
type Strategy;
}
impl<T> ToBech32String for T
where
T: Strategy + Clone,
Holder<T, <T as Strategy>::Strategy>: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
Holder::new(self.clone()).to_bech32_string()
}
}
impl<T> FromBech32Str for T
where
T: Strategy,
Holder<T, <T as Strategy>::Strategy>: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Holder::from_bech32_str(s)?.into_inner())
}
}
impl<T> ToBech32String for Holder<T, Wrapped>
where
T: Wrapper,
T::Inner: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
self.as_inner().as_inner().to_bech32_string()
}
}
impl<T> FromBech32Str for Holder<T, Wrapped>
where
T: Wrapper + Strategy,
T::Inner: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::from_inner(T::Inner::from_bech32_str(s)?)))
}
}
impl<T> ToBech32String for Holder<T, UsingStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
::bech32::encode(T::HRP, data.to_base32(), Variant::Bech32m)
.unwrap_or_else(|_| s!("Error: wrong bech32 prefix"))
}
}
impl<T> FromBech32Str for Holder<T, UsingStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = ::bech32::decode(s)?;
if hrp.as_str() != Self::HRP {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Ok(Self::new(T::strict_deserialize(Vec::<u8>::from_base32(
&data,
)?)?))
}
}
}
pub use strategies::Strategy;
// -- Sealed traits & their implementation
/// Special trait for preventing implementation of [`FromBech32DataStr`] and
/// others from outside of this crate. For details see
/// <https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed>
mod sealed {
use amplify::Wrapper;
use super::*;
pub trait HashType<Tag>: Wrapper<Inner = sha256t::Hash<Tag>>
where
Tag: sha256t::Tag,
{
}
pub trait ToPayload: ToBech32Payload {}
pub trait AsPayload: AsBech32Payload {}
pub trait FromPayload: FromBech32Payload {}
impl<T, Tag> HashType<Tag> for T
where
T: Wrapper<Inner = sha256t::Hash<Tag>>,
Tag: sha256t::Tag,
{
}
impl<T> ToPayload for T where T: ToBech32Payload {}
impl<T> AsPayload for T where T: AsBech3 | }
| random_line_split |
|
lib.rs |
fn to_bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.to_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> ToBech32DataString for T where T: sealed::ToPayload {}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait Bech32DataString: sealed::AsPayload {
/// Returns `data1...` Bech32 representation of a given type
fn bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.as_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> Bech32DataString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `data1...` Bech32 string
pub trait FromBech32DataStr
where
Self: Sized + sealed::FromPayload,
{
/// Reconstructs type data from `data1...` Bech32 string
fn from_bech32_data_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = bech32::decode(s)?;
if hrp != HRP_DATA {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Self::from_bech32_payload(Vec::<u8>::from_base32(&data)?)
}
}
impl<T> FromBech32DataStr for T where T: sealed::FromPayload {}
#[doc(hidden)]
#[cfg(feature = "zip")]
pub mod zip {
use amplify::Holder;
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
fn payload_to_bech32_zip_string(hrp: &str, payload: &[u8]) -> String {
use std::io::Write;
// We initialize writer with a version byte, indicating deflation
// algorithm used
let writer = vec![RAW_DATA_ENCODING_DEFLATE];
let mut encoder = DeflateEncoder::new(writer, Compression::Best);
encoder
.write_all(payload)
.expect("in-memory strict encoder failure");
let data = encoder.finish().expect("zip algorithm failure");
::bech32::encode(hrp, data.to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
fn bech32_zip_str_to_payload(hrp: &str, s: &str) -> Result<Vec<u8>, Error> {
let (prefix, data, version) = bech32::decode(s)?;
if prefix != hrp {
return Err(Error::WrongPrefix);
}
if version != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let data = Vec::<u8>::from_base32(&data)?;
match *data[..].first().ok_or(Error::NoEncodingPrefix)? {
RAW_DATA_ENCODING_DEFLATE => {
let decoded = inflate::inflate_bytes(&data[1..])
.map_err(Error::InflateError)?;
Ok(decoded)
}
unknown_ver => Err(Error::UnknownRawDataEncoding(unknown_ver)),
}
}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait ToBech32ZipString: sealed::ToPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn to_bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, &self.to_bech32_payload())
}
}
impl<T> ToBech32ZipString for T where T: sealed::ToPayload {}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait Bech32ZipString: sealed::AsPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, self.as_bech32_payload())
}
}
impl<T> Bech32ZipString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `z1...` (compressed binary data
/// blob) Bech32 string
pub trait FromBech32ZipStr: sealed::FromPayload {
/// Reconstructs type data from `z1...` (compressed binary data blob)
/// Bech32 string
fn from_bech32_zip_str(s: &str) -> Result<Self, Error> {
Self::from_bech32_payload(bech32_zip_str_to_payload(HRP_ZIP, s)?)
}
}
impl<T> FromBech32ZipStr for T where T: sealed::FromPayload {}
impl<T> ToBech32String for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
payload_to_bech32_zip_string(T::HRP, &data)
}
}
impl<T> FromBech32Str for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::strict_deserialize(
bech32_zip_str_to_payload(Self::HRP, s)?,
)?))
}
}
}
#[cfg(feature = "zip")]
pub use zip::*;
/// Trait representing given bitcoin hash type as a Bech32 `id1...` value
pub trait ToBech32IdString<Tag>
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
/// Returns Bech32-encoded string in form of `id1...` representing the type
fn to_bech32_id_string(&self) -> String;
}
/// Trait that can generate the type from a given Bech32 `id1...` value
pub trait FromBech32IdStr<Tag>
where
Self: sealed::HashType<Tag> + Sized,
Tag: sha256t::Tag,
{
/// Reconstructs the identifier type from the provided Bech32 `id1...`
/// string
fn from_bech32_id_str(s: &str) -> Result<Self, Error>;
}
impl<T, Tag> ToBech32IdString<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn to_bech32_id_string(&self) -> String {
::bech32::encode(HRP_ID, self.to_inner().to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T, Tag> FromBech32IdStr<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn from_bech32_id_str(s: &str) -> Result<T, Error> {
let (hrp, id, variant) = ::bech32::decode(s)?;
if hrp != HRP_ID {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let vec = Vec::<u8>::from_base32(&id)?;
Ok(Self::from_inner(Self::Inner::from_slice(&vec)?))
}
}
/// Helper method for serde serialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn serialize<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: ToBech32String,
{
serializer.serialize_str(&data.to_bech32_string())
}
/// Helper method for serde deserialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromBech32Str,
{
deserializer.deserialize_str(Bech32Visitor::<T>(std::marker::PhantomData))
}
#[cfg(feature = "serde")]
struct | Bech32Visitor | identifier_name |
|
lib.rs | pcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local; | use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; | random_line_split |
|
lib.rs | Message {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> | {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
} | identifier_body |
|
lib.rs | Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn | <T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns | subscribe | identifier_name |
value.go | uncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil |
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = | {
if err == io.EOF {
err = errTruncate
}
return nil, err
} | conditional_block |
value.go | uncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string |
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = | {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
} | identifier_body |
value.go | Truncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid | if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = 0 | })
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files { | random_line_split |
value.go | Truncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) | (fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = | fpath | identifier_name |
utils.go | return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndInstall(options types.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return app, nil
}
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return app, err
}
err = json.Unmarshal(indexBytes, app)
if err != nil {
return app, err
}
if !options.UseAppImageUpdate || !checkIfUpdateInformationExists(app.Filepath) {
logger.Debug("This app has no update information embedded")
// the appimage does nofalset contain update information
// we need to fetch the metadata from the index
if app.Source.Identifier == SourceGitHub {
logger.Debug("Fallback to GitHub API call from installation method")
installOptions := types.InstallOptions{
Name: app.Executable,
From: app.Source.Meta.Slug,
Executable: strings.Trim(app.Executable, " "),
FromGithub: true,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else if app.Source.Identifier == SourceZapIndex {
logger.Debug("Fallback to zap index from appimage.github.io")
installOptions := types.InstallOptions{
Name: app.Executable,
From: "",
Executable: strings.Trim(app.Executable, " "),
FromGithub: false,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else {
if options.Silent {
logger.Warn("%s has no update information. " +
"Please ask the AppImage author to include updateinformation for the best experience. " +
"Skipping.")
return nil, nil
} else {
return nil, errors.New("appimage has no update information")
}
}
}
logger.Debugf("Creating new updater instance from %s", app.Filepath)
updater, err := au.NewUpdaterFor(app.Filepath)
if err != nil {
return app, err
}
| logger.Debugf("Checking for updates")
hasUpdates, err := updater.Lookup()
if err != nil { | random_line_split |
|
utils.go |
func Install(options types.InstallOptions, config config.Store) error {
var asset types.ZapDlAsset
var err error
sourceIdentifier := ""
sourceSlug := ""
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
// check if the app is already installed
// if it is, do not continue
if helpers.CheckIfFileExists(indexFile) && !options.UpdateInplace {
fmt.Printf("%s is already installed \n", tui.Yellow(options.Executable))
return nil
} else if helpers.CheckIfFileExists(indexFile) {
// has the user requested to update the app in-place?
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.RemovePreviousVersions {
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.FromGithub {
asset, err = index.GitHubSurveyUserReleases(options, config)
sourceSlug = options.From
sourceIdentifier = SourceGitHub
if err != nil {
return err
}
} else if options.From == "" {
sourceIdentifier = SourceZapIndex
sourceSlug = options.Name
asset, err = index.ZapSurveyUserReleases(options, config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else | {
var apps []string
err := filepath.Walk(zapConfig.IndexStore, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return err
}
appName := ""
if index {
appName = path
} else {
appName = filepath.Base(path)
appName = strings.TrimSuffix(appName, ".json")
}
apps = append(apps, appName)
return err
})
return apps, err
} | identifier_body |
|
utils.go | config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndIns | es.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable | tall(options typ | identifier_name |
utils.go | , config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil | else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndInstall(options types.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
| {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} | conditional_block |
student-layout.component.ts | navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
se | tLayoutType(t | identifier_name |
|
student-layout.component.ts | -out')),
transition('out => in', animate('400ms ease-in-out'))
]),
trigger('slideOnOff', [
state('on', style({
transform: 'translate3d(0, 0, 0)'
})),
state('off', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('on => off', animate('400ms ease-in-out')),
transition('off => on', animate('400ms ease-in-out'))
]),
trigger('fadeInOutTranslate', [
transition(':enter', [
style({opacity: 0}),
animate('400ms ease-in-out', style({opacity: 1}))
]),
transition(':leave', [
style({transform: 'translate(0)'}),
animate('400ms ease-in-out', style({opacity: 0}))
])
])
]
})
export class StudentLayoutComponent implements OnInit {
navType: string; /* st1, st2(default), st3, st4 */
themeLayout: string; /* vertical(default) */
layoutType: string; /* dark, light */
verticalPlacement: string; /* left(default), right */
verticalLayout: string; /* wide(default), box */
deviceType: string; /* desktop(default), tablet, mobile */
verticalNavType: string; /* expanded(default), offcanvas */
verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
} | }
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this | }
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn; | random_line_split |
student-layout.component.ts | verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
| this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
| identifier_body |
|
student-layout.component.ts | Border: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
setLayoutType(type: string) {
this.layoutType = type;
if (type === 'dark') {
this.headerTheme = 'theme6';
this.navBarTheme = 'theme1';
this.logoTheme = 'theme6';
document.querySelector('body').classList.add('dark');
} else {
this.headerTheme = 'theme1';
this.navBarTheme = 'themelight1';
this.logoTheme = 'theme1';
document.querySelector('body').classList.remove('dark');
}
}
setNavBarTheme(theme: string) {
if (theme === 'themelight1') {
| this.navBarTheme = 'themelight1';
} e | conditional_block |
|
queue.go | ess(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" | else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
| {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} | conditional_block |
queue.go | ess(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) | () (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
| getTransmitRange | identifier_name |
queue.go | ess(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast |
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
| {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
} | identifier_body |
queue.go | ess(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
} | // getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
| random_line_split |
|
engine.go | 2) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
| // dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=49976 | // principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore) | random_line_split |
engine.go | implements searchTree framework
//
// searchTree fails soft, i.e. the score returned can be outside the bounds
//
// α, β represent lower and upper bounds
// depth is the search depth (decreasing)
//
// returns the score of the current position up to depth (modulo reductions/extensions)
// the returned score is from current player's POV
//
// invariants:
// if score <= α then the search failed low and the score is an upper bound
// else if score >= β then the search failed high and the score is a lower bound
// else score is exact
//
// assuming this is a maximizing nodes, failing high means that a
// minimizing ancestor node already has a better alternative
func (eng *Engine) searchTree(α, β, depth int32) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
// principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore)
// dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα | != 0 {
return false
}
for _, m := range eng.ignoreRootMoves {
if m == move {
return true
}
}
for _, m := range eng.onlyRootMoves {
if m == move {
return false
}
}
return len(eng.onlyRootMoves) != 0
}
// searchTree | identifier_body |
|
engine.go | uning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
// principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore)
// dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. S | ee PlayMov | conditional_block |
|
engine.go | Moves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. See PlayMoves for the returned values
func (eng *Engine) Play(tc *TimeControl) (score int32, moves []Move) {
return eng.PlayMoves(tc, nil)
}
// PlayMoves evaluates current position searching only moves specifid by rootMoves
//
// returns the principal variation, that is
// moves[0] is the best move found and
// moves[1] is the pondering move
//
// if rootMoves is nil searches all root moves
//
// returns a nil pv if no move was found because the game is already finished
// returns empty pv array if it's valid position, but no pv was found (e.g. search depth is 0)
//
// Time control, tc, should already be started
func (eng *Engine) PlayMoves(tc *TimeControl, rootMoves []Move) (score int32, moves []Move) {
if !initialized {
initEngine()
}
eng.Log.BeginSearch()
eng.Stats = Stats{Depth: -1}
eng.rootPly = eng.Position.Ply
eng.timeControl = tc
eng.stopped = false
eng.checkpoint = checkpointStep
eng.stack.Reset(eng.Position)
eng.history.newSearch()
eng.onlyRootMoves = rootMoves
for depth := int32(0); depth < 64; depth++ {
if !tc.NextDepth(depth) {
// stop if tc control says we are done
// search at least one depth, otherwise a move cannot be returned
break
}
eng.Stats.Depth = depth
if s, m := eng.searchMultiPV(depth, score); len(moves) == 0 || len(m) != 0 {
score, moves = s, m
}
}
eng.Log.EndSearch()
if len(moves) == 0 && !eng.Position.HasLegalMoves() {
return 0, nil
} else if moves == nil {
return score, []Move{}
}
return score, moves
}
// ply returns the ply from the beginning of the search
func (eng *Engine) ply() int32 {
return int32(eng.Position.Ply - eng.rootPly)
}
// SetPosition sets current position
// if pos is nil, the starting position is set
func (eng *Engine) SetPosition(pos *Position) {
if pos != nil {
eng.Position = pos
} else {
eng.Position, _ = PositionFromFEN(FENStartPos)
}
}
// DoMove executes a move.
func (eng *Engine) DoMove(move Move) {
eng.Position.DoMove(move)
GlobalHashTable.prefetch(eng.Position)
}
// UndoMove undoes the last move
func (eng *Engine) UndoMove() {
eng.Position.UndoMove()
}
// Score evaluates current position from current player's POV
func (eng *Engine) Score() int32 {
return Evaluate(eng.Position).GetCentipawnsScore() * eng.Position.Us().Multiplier()
}
// cachedScore implements a cache on top of Score
// the cached static evaluation is stored in the hashEntry
func (eng *Engine) cachedScore(e *hashEntry) int32 {
if e.kind&hasStatic == 0 {
e.kind |= hasStatic
| e.static = | identifier_name |
|
app.component.ts | ;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() |
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
| {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
} | identifier_body |
app.component.ts | 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
| (): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
var | populateTimeslots | identifier_name |
app.component.ts | ;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) |
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
| {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
} | conditional_block |
app.component.ts | layout: string;
layouts: {
name: string;
layout: IKeyboardLayout;
}[];
get keyboardVisible(): boolean {
return this._keyboardService.isOpened;
}
constructor(private _keyboardService: MdKeyboardService,
@Inject(LOCALE_ID) public locale,
@Inject(MD_KEYBOARD_LAYOUTS) private _layouts,
private http: HttpClient) { }
ngOnInit(): void {
//var that = this;
window.addEventListener('load', function(){
document.addEventListener('touchstart', function(e){
if (timeoutID > 0 && timeoutID != null){
window.clearTimeout(timeoutID);
timeoutID = window.setTimeout(timeoutTTL);
}
e.preventDefault()
}, false)
}, false)
this.noEvents = true;
this.defaultLocale = ` ${this.LOCALE}`.slice(1);
this.utcTime();
this.transitionTimer = new SimpleTimer();
//console.log(this.timeSlots);
this.bookEvent = false;
this.cancellation = false;
this.calendarWorkdayEndHour = 17;
this.calendarWorkdayStartHour = 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn1 | isDebug: boolean;
defaultLocale: string;
| random_line_split |
|
dhcpd.go | nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil |
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get | {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
} | conditional_block |
dhcpd.go | .Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet { | var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr()) | random_line_split |
|
dhcpd.go | := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr())
if !isValidPacket(p) {
return nil
}
server := options[dhcp4.OptionServerIdentifier]
if server != nil && !net.IP(server).Equal(s.ipnet.IP) {
log.Tracef("Request message not for this DHCP server (%v vs %v)", server, s.ipnet.IP)
return nil // Message not for this dhcp server
}
if reqIP == nil {
reqIP = p.CIAddr()
} else if reqIP == nil || reqIP.To4() == nil {
log.Tracef("Requested IP isn't a valid IPv4: %s", reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease = s.findLease(p)
if lease == nil {
log.Tracef("Lease for %s isn't found", p.CHAddr())
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
if !lease.IP.Equal(reqIP) {
log.Tracef("Lease for %s doesn't match requested/client IP: %s vs %s",
lease.HWAddr, lease.IP, reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease.Expiry = time.Now().Add(s.leaseTime)
log.Tracef("Replying with ACK. IP: %s HW: %s Expire: %s",
lease.IP, lease.HWAddr, lease.Expiry)
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
return dhcp4.ReplyPacket(p, dhcp4.ACK, s.ipnet.IP, lease.IP, s.leaseTime, opt)
}
func (s *Server) handleInform(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Inform. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleRelease(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Release. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleDecline(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Decline. IP: %s HW: %s",
reqIP, p.CHAddr())
return nil
}
// Leases returns the list of current DHCP leases (thread-safe)
func (s *Server) Leases() []Lease {
var result []Lease
now := time.Now().Unix()
s.RLock()
for _, lease := range s.leases {
if lease.Expiry.Unix() > now {
result = append(result, *lease)
}
}
s.RUnlock()
return result
}
// Print information about the current leases
func (s *Server) printLeases() {
log.Tracef("Leases:")
for i, lease := range s.leases {
log.Tracef("Lease #%d: hwaddr %s, ip %s, expiry %s",
i, lease.HWAddr, lease.IP, lease.Expiry)
}
}
// Reset internal state
func (s *Server) reset() | {
s.Lock()
s.leases = nil
s.Unlock()
s.IPpool = make(map[[4]byte]net.HardwareAddr)
} | identifier_body |
|
dhcpd.go | nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
}
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) | (p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get N | ServeDHCP | identifier_name |
index.js | 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
g | (id) {
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
},
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case "ArrowUp":
newTabIndex = this.getPrevTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case "DownUp":
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
*/
}
},
/**
* task completed!
* returns true if task is completed.
* @param {number} x
* @return {boolean}
*/
levelUp | etValue | identifier_name |
index.js | 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
getValue (id) { |
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case "ArrowUp":
newTabIndex = this.getPrevTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case "DownUp":
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
*/
}
},
/**
* task completed!
* returns true if task is completed.
* @param {number} x
* @return {boolean}
*/
levelUp |
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
}, | identifier_body |
index.js | * @type {string}
*/
restartSVG: `
<svg version="1.1" viewBox="0 0 178.2 186.08" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(-287.94 -456.48)" fill="none">
<path transform="matrix(.46642 -.98449 1.0097 .47838 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
getValue (id) {
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
},
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case | random_line_split |
||
parse_files_for_transcripts.py | , non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def | (dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + | _print_gff | identifier_name |
parse_files_for_transcripts.py | , non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
|
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + | continue | conditional_block |
parse_files_for_transcripts.py | data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format
firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
|
def main(argv):
sorted_keys = list() | random_line_split |
|
parse_files_for_transcripts.py | firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend | sorted_keys = list()
data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format | identifier_body |
|
mod.rs |
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number | {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
} | identifier_body |
|
mod.rs | .
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 |
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
| {
panic!("Failed to release device context");
} | conditional_block |
mod.rs | happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn | (&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
| get_proc_address | identifier_name |
mod.rs | _ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll | RawWindowHandle::Windows(handle) => handle.hwnd as HWND, | random_line_split |
|
appconfig.go | `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func | (hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdger | RegexpHostnameFrontend | identifier_name |
appconfig.go | `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error { | ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouter | return FirstError( | random_line_split |
appconfig.go | ) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouterAdmin,
}
}
func PromMetricsBackend() Backend {
return Backend{
Kind: BackendKindPromMetrics,
}
}
func AuthV0Backend(bearerToken string, authorizedBackend Backend) Backend {
return Backend{
Kind: BackendKindAuthV0,
AuthV0Opts: &BackendOptsAuthV0{
BearerToken: bearerToken,
AuthorizedBackend: &authorizedBackend,
},
}
}
func AuthSsoBackend(
idServerUrl string,
allowedUserIds []string,
audience string,
authorizedBackend Backend,
) Backend {
return Backend{
Kind: BackendKindAuthSso,
AuthSsoOpts: &BackendOptsAuthSso{
IdServerUrl: idServerUrl,
AllowedUserIds: allowedUserIds,
Audience: audience,
AuthorizedBackend: &authorizedBackend,
},
}
}
// describers
func (a *Application) Describe() string {
lines := []string{
a.Id,
" backend = " + a.Backend.Describe(),
}
for _, frontend := range a.Frontends {
lines = append(lines, " frontend = "+frontend.Describe())
}
return strings.Join(lines, "\n")
}
func (f *Frontend) Describe() string {
switch f.Kind {
case FrontendKindHostname:
return string(f.Kind) + ":" + f.Hostname + f.PathPrefix
case FrontendKindHostnameRegexp:
return string(f.Kind) + ":" + f.HostnameRegexp + f.PathPrefix
case FrontendKindPathPrefix:
return string(f.Kind) + ":" + f.PathPrefix
default:
return string(f.Kind)
}
}
func (b *Backend) Describe() string {
switch b.Kind {
case BackendKindS3StaticWebsite:
return string(b.Kind) + ":" + b.S3StaticWebsiteOpts.DeployedVersion
case BackendKindReverseProxy:
return string(b.Kind) + ":" + strings.Join(b.ReverseProxyOpts.Origins, ", ")
case BackendKindAwsLambda:
return string(b.Kind) + ":" + fmt.Sprintf("%s@%s", b.AwsLambdaOpts.FunctionName, b.AwsLambdaOpts.RegionId)
case BackendKindAuthV0:
return string(b.Kind) + ":" + fmt.Sprintf("[bearerToken=...] -> %s", b.AuthV0Opts.AuthorizedBackend.Describe())
case BackendKindRedirect:
return string(b.Kind) + ":" + b.RedirectOpts.To
case BackendKindTurbocharger:
return string(b.Kind) + ":" + b.TurbochargerOpts.Manifest.String()
case BackendKindAuthSso:
return string(b.Kind) + ":" + fmt.Sprintf("[audience=%s] -> %s", b.AuthSsoOpts.Audience, b.AuthSsoOpts.AuthorizedBackend.Describe())
case BackendKindEdgerouterAdmin, BackendKindPromMetrics: // to please exhaustive lint
return string(b.Kind)
default: // should never actually arrive here
return string(b.Kind)
}
}
type TlsConfig struct {
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
ServerName string `json:"server_name,omitempty"` // used to verify the hostname on the server cert. also sent via SNI
}
func (t *TlsConfig) HasMeaningfulContent() bool {
if t.InsecureSkipVerify || t.ServerName != "" {
return true
} else {
return false
}
}
func (t *TlsConfig) SelfOrNilIfNoMeaningfulContent() *TlsConfig {
if t.HasMeaningfulContent() {
return t
} else {
return nil
}
}
// TODO: gokit/builtin
func ErrorIfUnset(isUnset bool, fieldName string) error {
if isUnset {
return fmt.Errorf("'%s' is required but not set", fieldName)
} else {
return nil
}
}
// TODO: gokit/builtin
func FirstError(errs ...error) error {
for _, err := range errs | {
if err != nil {
return err
}
} | conditional_block |
|
appconfig.go | `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error |
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEd | {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
} | identifier_body |
flutter_service_worker.js | "assets/assets/sounds/Index13Length4.wav": "4eac2adb92f81fe26c37e268f703fa2c",
"assets/assets/sounds/Index14Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index14Length1.wav": "aa7777a5e53a9514c89a01beedf05a13",
"assets/assets/sounds/Index14Length2.wav": "85f14e073d1698e24709436f91eae42c",
"assets/assets/sounds/Index14Length3.wav": "cdf2a57e901376c7655ac9b90728cabd",
"assets/assets/sounds/Index15Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index15Length1.wav": "51de52e5be0125a54c637a7a736b3055",
"assets/assets/sounds/Index15Length2.wav": "7db7b43cab2c9294dd458e1d0b57dfae",
"assets/assets/sounds/Index16Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index16Length1.wav": "099e44307f3bdf7174b4f44836ecc658",
"assets/assets/sounds/Index1Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index1Length1.wav": "772615684d0f2489bf9166a569356866",
"assets/assets/sounds/Index1Length12.wav": "4887a12550b4952f0f6e189ddbe323c7",
"assets/assets/sounds/Index1Length16.wav": "b119c78b320e927c6f3ffae5a9f30a04",
"assets/assets/sounds/Index1Length2.wav": "39eb8faadc824e8fed8d18866ce1a2d4",
"assets/assets/sounds/Index1Length3.wav": "3999d2770500a785fa6d10e571e16920",
"assets/assets/sounds/Index1Length4.wav": "01ae3bf9420f02a7ae3542ec8d898485",
"assets/assets/sounds/Index1Length6.wav": "1edf1340657f9bdc3d048a1409c3b791",
"assets/assets/sounds/Index1Length8.wav": "23571245b6b38b08e7938f26dade427a",
"assets/assets/sounds/Index2Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index2Length1.wav": "2b209a60499653a81c82b2e34cadee05",
"assets/assets/sounds/Index2Length12.wav": "b77500c1be01fc23fc87df1d02a88584",
"assets/assets/sounds/Index2Length2.wav": "d3b4a0fbcc215c7c9b398b481b646e69",
"assets/assets/sounds/Index2Length3.wav": "9d4681203fa79fda8a799f8abb2274d0",
"assets/assets/sounds/Index2Length4.wav": "9bc307dfc2810972d7fb851780bd4856",
"assets/assets/sounds/Index2Length6.wav": "12256e9827dd704044361b6bea633697",
"assets/assets/sounds/Index2Length8.wav": "236cc6760ff8709f610a5df29f7ee914",
"assets/assets/sounds/Index3Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index3Length1.wav": "25501e35dab4ac090f3d393961c71469",
"assets/assets/sounds/Index3Length12.wav": "6b153fdd906699556ac9c48fe901f824",
"assets/assets/sounds/Index3Length2.wav": "3edf9bea4df56bd4e8298b080258e7ed",
"assets/assets/sounds/Index3Length3.wav": "46134b6c36782d084478b5e9e6b762fe",
"assets/assets/sounds/Index3Length4.wav": "6f107aa30809dc0acc671da9d57e0c98",
"assets/assets/sounds/Index3Length6.wav": "665578c3526000a6bb0979a8b71826af",
"assets/assets/sounds/Index3Length8.wav": "3d806f30a2daba348cc6b946cbcccdf7",
"assets/assets/sounds/Index4Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index4Length1.wav": "3743e7eb1b51781664c79375b942c6da",
"assets/assets/sounds/Index4Length12.wav": "c43ada913dc18cad452a60dd182ed75e",
"assets/assets/sounds/Index4Length2.wav": "6cf0776fffa186c2c2a12558d83768a7",
"assets/assets/sounds/Index4Length3.wav": "f849bddc85b2e7c6fd1f63bcbbd40a6d",
"assets/assets/sounds/Index4Length4.wav": "6b24a2da10a2f1c6a0b04191473341a1",
"assets/assets/sounds/Index4Length6.wav": "c458131ab9551df4f184a25f4e11b263",
"assets/assets/sounds/Index4Length8.wav": "81464c9774dad9c6a3580647ed9d3666",
"assets/assets/sounds/Index5Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index5Length1.wav": "806ab8337ca57173787a4e702a1344ef",
"assets/assets/sounds/Index5Length12.wav": "c1fe4a3b8bc188a2815d18aabb8cef2d",
"assets/assets/sounds/Index5Length2.wav": "87db89ddecceb04ed380c9c0d4 | "assets/assets/sounds/Index13Length2.wav": "746f509a12e5f7f2ece7209e7722f3f9",
"assets/assets/sounds/Index13Length3.wav": "90665e5c989f7e7379e2a7e4d54f3aac", | random_line_split |
|
flutter_service_worker.js | 1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') {
downloadOffline();
return;
}
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() | {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
} | identifier_body |
|
flutter_service_worker.js | 5",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') {
downloadOffline();
return;
}
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
}
// Attempt to download the resource online before falling back to
// the offline cache.
function | onlineFirst | identifier_name |
|
flutter_service_worker.js | ": "fe792670c83be5650b0a692e008c68e9",
"assets/assets/sounds/Index8Length4.wav": "c2c338ea17f946e033e63e740e84853e",
"assets/assets/sounds/Index8Length6.wav": "bf359945806a4c1a1af022d3ab7c8388",
"assets/assets/sounds/Index8Length8.wav": "7685dc4811b1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') | {
downloadOffline();
return;
} | conditional_block |
|
facade.rs | // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => |
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add | {
let return_value = to_value(&repos)?;
return Ok(return_value);
} | conditional_block |
facade.rs | // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
} | }
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to |
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) } | random_line_split |
facade.rs | // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn | () {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to | serde_repo_configuration | identifier_name |
facade.rs | // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> |
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add | {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
} | identifier_body |
gulpfile.js | Depends on: watch
*/
gulp.task('live-reload', ['watch'], function() {
var livereload = require('gulp-livereload');
settings.liveReload = true;
// first, delete the index.html from the dist folder as we will copy it later
del([settings.dist + 'index.html']);
// add livereload script to the index.html
gulp.src([settings.src + 'index.html'])
.pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js')))
.pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', '')))
.pipe(replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1"))
.pipe(gulp.dest(settings.dist));
// Create LiveReload server
livereload.listen();
// Watch any files in dist/*, reload on change
gulp.watch([settings.dist + '**']).on('change', livereload.changed);
});
/**
* Task to handle and deploy all javascript, application & vendor
*
* Depends on: scripts-app, scripts-vendor
*/
gulp.task('scripts', ['scripts-app','scripts-vendor']);
/**
* Removes the node_modules
*/
gulp.task('remove',['clean'], function(cb){
del('node_modules', cb);
});
/**
* Minifies all javascript found in the `src/js/**` folder. All files will be concatenated into `app.js`. Minified and non-minified versions are copied to the dist folder.
* This will also generete sourcemaps for the minified version.
*
* Depends on: docs
*/
gulp.task('scripts-app', ['docs-js'], function() {
var jshint = require('gulp-jshint'),
ngannotate = require('gulp-ng-annotate'),
stripDebug = require('gulp-strip-debug'),
stylish = require('jshint-stylish'),
sourcemaps = require('gulp-sourcemaps'),
uglify = require('gulp-uglify');
// gulpify the huna library
gulp.src([settings.src + 'js/app/huna.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
return gulp.src(['!'+settings.src + 'js/app/huna.js', settings.src + 'js/app/**/*.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(concat('app.js'))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
});
/**
* Task to handle all vendor specific javasript. All vendor javascript will be copied to the dist directory. Also a concatinated version will be made, available in \dist\js\vendor\vendor.js
*/
gulp.task('scripts-vendor', ['scripts-vendor-maps'], function() {
// script must be included in the right order. First include angular, then angular-route
return gulp.src([settings.src + 'js/vendor/*/**/angular.min.js',settings.src + 'js/vendor/**/*.js'])
.pipe(gulp.dest(settings.dist + 'js/vendor'))
.pipe(concat('vendor.js'))
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Copy all vendor .js.map files to the vendor location
*/
gulp.task('scripts-vendor-maps', function(){
var flatten = require('gulp-flatten');
return gulp.src(settings.src + 'js/vendor/**/*.js.map')
.pipe(flatten())
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Task to start a server on port 4000.
*/
gulp.task('server', function(){
var express = require('express'),
app = express(),
url = require('url'),
port = argv.port||settings.serverport,
proxy = require('proxy-middleware');
app.use(express.static(__dirname + "/dist"));
if (argv.remote) {
app.use('/api', proxy(url.parse('http://huna.tuvok.nl:1337/api')));
} else {
app.use('/api', proxy(url.parse('http://localhost:1337/api')));
}
app.listen(port);
gutil.log('Server started. Port', port,"baseDir",__dirname+"/"+settings.dist);
});
gulp.task('nodemon', function(cb) {
var nodemon = require('gulp-nodemon');
// We use this `called` variable to make sure the callback is only executed once
var called = false;
return nodemon({
script: 'app.js',
watch: ['app.js', 'api/**/*.*', 'config/**/*.*']
})
.on('start', function onStart() {
if (!called) {
cb();
}
called = true;
})
.on('restart', function onRestart() {
// Also reload the browsers after a slight delay
setTimeout(function reload() {
browserSync.reload({
stream: false
});
}, 500);
});
});
/**
* Task to start the backend servers.
* Depends on: backend-mongo, backend-server
*/
gulp.task('backend', ['backend-mongo', 'backend-server'], function () {});
/**
* Task to start the backend mongo server
* should be running before the backend-server
*/
gulp.task('backend-mongo', function () {
var exec = require('child_process').exec;
exec('mongod', function (err, stdout, stderr) {
console.log(stdout);
console.log(stderr);
onError(err);
});
});
/**
* Task to start up the backend server
* run the mongo db first
*/
gulp.task('backend-server', function () {
var exec = require('child_process').exec;
exec('node app.js', function (err, stdout, stderr) {
console.log(stdout);
console.log(stderr);
onError(err);
});
});
/**
* Task to start a server on port 4000 and used the live reload functionality.
* Depends on: server, live-reload
*/
gulp.task('start', ['live-reload', 'server'], function(){});
/**
* Compile Sass into Css and minify it. Minified and non-minified versions are copied to the dist folder.
* This will also auto prefix vendor specific rules.
*/
gulp.task('styles', function() {
var autoprefixer = require('gulp-autoprefixer'),
minifycss = require('gulp-minify-css'),
sass = require('gulp-sass');
return gulp.src([settings.src + 'styles/main.scss', settings.src + '/js/vendor/**/c3.min.css'])
.pipe(plumber(settings.plumberConfig()))
.pipe(sass({ style: 'expanded' }))
// .pipe(autoprefixer('last 2 version', 'safari 5', 'ie 8', 'ie 9', 'opera 12.1', 'ios 6', 'android 4'))
.pipe(gulp.dest(settings.dist + 'css'))
.pipe(rename({suffix: '.min'}))
.pipe(minifycss())
.pipe(gulp.dest(settings.dist + 'css'));
});
/**
* Output TODO's & FIXME's in markdown and json file as well
*/
gulp.task('todo', function() {
var todo = require('gulp-todo');
gulp.src([settings.src + 'js/app/**/*.js',settings.src + 'styles/app/**/*.scss'])
.pipe(plumber(settings.plumberConfig()))
.pipe(todo())
.pipe(gulp.dest(settings.reports)) //output todo.md as markdown
.pipe(todo.reporter('json', {fileName: 'todo.json'}))
.pipe(gulp.dest(settings.reports)) //output todo.json as json
});
/**
* Watches changes to template, Sass, javascript and image files. On change this will run the appropriate task, either: copy styles, scripts or images.
*/
gulp.task('watch', function() {
// watch index.html
gulp.watch(settings.src + 'index.html', ['copy-index']);
// watch html files
gulp.watch(settings.src + '**/*.html', ['copy-template']);
// watch fonts
gulp.watch(settings.src + 'fonts/**', ['copy-fonts']);
// Watch .scss files
gulp.watch(settings.src + 'styles/**/*.scss', ['styles']);
// Watch app .js files
gulp.watch(settings.src + 'js/app/**/*.js', ['scripts-app']);
// Watch vendor .js files
gulp.watch(settings.src + 'js/vendor/**/*.js', ['scripts-vendor']);
// Watch image files
gulp.watch(settings.src + 'img/**/*', ['images']);
});
function | onError | identifier_name |
|
gulpfile.js | });
/**
* Task for copying fonts only
*/
gulp.task('copy-fonts', function() {
var deferred = q.defer();
// copy all fonts
setTimeout(function() {
gulp.src( settings.src + 'fonts/**')
.pipe(gulp.dest(settings.dist + 'fonts'));
deferred.resolve();
}, 1);
return deferred.promise;
});
/**
* task for copying templates only
*/
gulp.task('copy-template', function() {
// copy all html && json
return gulp.src( [settings.src + 'js/app/**/*.html', settings.src + 'js/app/**/*.json'])
.pipe(cache(gulp.dest('dist/js/app')));
});
/**
* Task for copying index page only. Optionally add live reload script to it
*/
gulp.task('copy-index', function() {
// copy the index.html
return gulp.src(settings.src + 'index.html')
.pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js')))
.pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', '')))
.pipe(gulpif(settings.liveReload, replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1")))
.pipe(cache(gulp.dest(settings.dist)));
});
/**
* Default task.
* Depends on: build
*/
gulp.task('default', ['build']);
/**
* Create Javascript documentation
*/
gulp.task('docs-js', ['todo'], function(){
var gulpDoxx = require('gulp-doxx');
gulp.src([settings.src + '/js/**/*.js', 'README.md', settings.reports + '/TODO.md'])
.pipe(gulpDoxx({
title: config.name,
urlPrefix: "file:///"+__dirname+settings.reports
}))
.pipe(gulp.dest(settings.reports));
});
/**
* Task to optimize and deploy all images found in folder `src/img/**`. Result is copied to `dist/img`
*/
gulp.task('images', function() {
var imagemin = require('gulp-imagemin');
var deferred = q.defer();
setTimeout(function() {
gulp.src(settings.src + 'img/**/*')
.pipe(plumber(settings.plumberConfig()))
.pipe(cache(imagemin({ optimizationLevel: 5, progressive: true, interlaced: true })))
.pipe(gulp.dest(settings.dist + 'img'));
deferred.resolve();
}, 1);
return deferred.promise;
});
/**
* log some info
*/
gulp.task('info',function(){
// log project details
gutil.log( gutil.colors.cyan("Running gulp on project "+config.name+" v"+ config.version) );
gutil.log( gutil.colors.cyan("Author: " + config.author[0].name) );
gutil.log( gutil.colors.cyan("Email : " + config.author[0].email) );
gutil.log( gutil.colors.cyan("Site : " + config.author[0].url) );
gutil.log( gutil.colors.cyan("Author: " + config.author[1].name) );
gutil.log( gutil.colors.cyan("Email : " + config.author[1].email) );
gutil.log( gutil.colors.cyan("Site : " + config.author[1].url) );
// log info
gutil.log("If you have an enhancement or encounter a bug, please report them on", gutil.colors.magenta(config.bugs.url));
});
/**
* Start the live reload server. Live reload will be triggered when a file in the `dist` folder changes. This will add a live-reload script to the index.html page, which makes it all happen.
* Depends on: watch
*/
gulp.task('live-reload', ['watch'], function() {
var livereload = require('gulp-livereload');
settings.liveReload = true;
// first, delete the index.html from the dist folder as we will copy it later
del([settings.dist + 'index.html']);
// add livereload script to the index.html
gulp.src([settings.src + 'index.html'])
.pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js')))
.pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', '')))
.pipe(replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1"))
.pipe(gulp.dest(settings.dist));
// Create LiveReload server
livereload.listen();
// Watch any files in dist/*, reload on change
gulp.watch([settings.dist + '**']).on('change', livereload.changed);
});
/**
* Task to handle and deploy all javascript, application & vendor
*
* Depends on: scripts-app, scripts-vendor
*/
gulp.task('scripts', ['scripts-app','scripts-vendor']);
/**
* Removes the node_modules
*/
gulp.task('remove',['clean'], function(cb){
del('node_modules', cb);
});
/**
* Minifies all javascript found in the `src/js/**` folder. All files will be concatenated into `app.js`. Minified and non-minified versions are copied to the dist folder.
* This will also generete sourcemaps for the minified version.
*
* Depends on: docs
*/
gulp.task('scripts-app', ['docs-js'], function() {
var jshint = require('gulp-jshint'),
ngannotate = require('gulp-ng-annotate'),
stripDebug = require('gulp-strip-debug'),
stylish = require('jshint-stylish'),
sourcemaps = require('gulp-sourcemaps'),
uglify = require('gulp-uglify');
// gulpify the huna library
gulp.src([settings.src + 'js/app/huna.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
return gulp.src(['!'+settings.src + 'js/app/huna.js', settings.src + 'js/app/**/*.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(concat('app.js'))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
});
/**
* Task to handle all vendor specific javasript. All vendor javascript will be copied to the dist directory. Also a concatinated version will be made, available in \dist\js\vendor\vendor.js
*/
gulp.task('scripts-vendor', ['scripts-vendor-maps'], function() {
// script must be included in the right order. First include angular, then angular-route
return gulp.src([settings.src + 'js/vendor/*/**/angular.min.js',settings.src + 'js/vendor/**/*.js'])
.pipe(gulp.dest(settings.dist + 'js/vendor'))
.pipe(concat('vendor.js'))
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Copy all vendor .js.map files to the vendor location
*/
gulp.task('scripts-vendor-maps', function(){
var flatten = require('gulp-flatten');
return gulp.src(settings.src + 'js/vendor/**/*.js.map')
.pipe(flatten())
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Task to start a server on port 4000.
*/
gulp.task('server', function(){
var express = require('express'),
app = express(),
url = require('url'),
port = argv.port||settings.serverport,
proxy = require('proxy-middleware');
app.use(express.static(__dirname + "/dist"));
if (argv.remote) {
app.use('/api', proxy(url.parse('http://huna.tuvok.nl:1337/api')));
} else {
app.use('/api', proxy(url.parse('http://localhost:1337/api')));
}
app.listen(port);
gutil.log('Server started. Port', port,"baseDir",__dirname+"/"+settings.dist);
});
gulp.task('nodemon', function(cb) {
var nodemon = require('gulp-nodemon');
// We use this `called` variable to make sure the callback is only executed once
var called = false;
return nodemon({
script: 'app.js',
watch: ['app.js', 'api/**/*.*', 'config/**/*.*']
})
.on('start', function onStart() {
if (!called) | {
cb();
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.