prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
<|fim_middle|>
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=(['odrpack.h'] + odrpack_src),
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
<|fim▁end|> | warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f') |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=(['odrpack.h'] + odrpack_src),
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | from numpy.distutils.core import setup
setup(**configuration(top_path='').todict()) |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def <|fim_middle|>(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=(['odrpack.h'] + odrpack_src),
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
<|fim▁end|> | configuration |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
<|fim▁hole|> company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)<|fim▁end|> | class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
<|fim_middle|>
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
<|fim_middle|>
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | return self.type_desc |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
<|fim_middle|>
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
<|fim_middle|>
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | return self.street_address + ',' + self.city |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
<|fim_middle|>
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
<|fim_middle|>
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | abstract = True |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
<|fim_middle|>
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
<|fim_middle|>
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | return self.title |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
<|fim_middle|>
<|fim▁end|> | title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
<|fim_middle|>
<|fim▁end|> | return unicode(self.user) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def <|fim_middle|>(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | __unicode__ |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def <|fim_middle|>(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | __unicode__ |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def <|fim_middle|>(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def __unicode__(self, arg):
return unicode(self.user)
<|fim▁end|> | __unicode__ |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class OrganisationType(models.Model):
type_desc = models.CharField(max_length=200)
def __unicode__(self):
return self.type_desc
class Address(models.Model):
street_address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
province = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
def __unicode__(self):
return self.street_address + ',' + self.city
class HattiUser(models.Model):
user = models.OneToOneField(User)
address = models.ForeignKey(Address)
telephone = models.CharField(max_length=500)
date_joined = models.DateTimeField(auto_now_add=True)
fax = models.CharField(max_length=100)
avatar = models.CharField(max_length=100, null=True, blank=True)
tagline = models.CharField(max_length=140)
class Meta:
abstract = True
class AdminOrganisations(HattiUser):
title = models.CharField(max_length=200)
organisation_type = models.ForeignKey(OrganisationType)
def __unicode__(self):
return self.title
class Customer(HattiUser):
title = models.CharField(max_length=200, blank=True, null=True)
is_org = models.BooleanField();
org_type = models.ForeignKey(OrganisationType)
company = models.CharField(max_length = 200)
def <|fim_middle|>(self, arg):
return unicode(self.user)
<|fim▁end|> | __unicode__ |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
<|fim▁hole|> f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)<|fim▁end|> | def print_all(f):
print f.read()
def rewind(f): |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def print_all(f):
<|fim_middle|>
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | print f.read() |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
<|fim_middle|>
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | f.seek(0) |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
<|fim_middle|>
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | print line_count, f.readline() |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def <|fim_middle|>(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | print_all |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def <|fim_middle|>(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | rewind |
<|file_name|>ex20.py<|end_file_name|><|fim▁begin|>from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def <|fim_middle|>(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
<|fim▁end|> | print_a_line |
<|file_name|>variables.py<|end_file_name|><|fim▁begin|>__author__ = 'sekely'
'''
we are using variables almost everywhere in the code.<|fim▁hole|>
this of it as the famous "x" from high school
x = 5, right?
the only thing is, that in Python "x" can store anything
'''
# try this code:
x = 5
y = x + 3
print(y)
# what about this? will it work?
x = 'hello'
y = ' '
z = 'world!'
w = x + y + z
print(w)<|fim▁end|> | variables are used to store results, calculations and many more. |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :<|fim▁hole|> cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
<|fim_middle|>
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | return "View Generator Service is Active!" |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
<|fim_middle|>
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | executor.submit(runTask)
return "" |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :
<|fim_middle|>
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0) |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
<|fim_middle|>
<|fim▁end|> | app.run( debug=True, port=Config.PORT, host=Config.HOST ) |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def <|fim_middle|>( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | index |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def <|fim_middle|>( ) :
executor.submit(runTask)
return ""
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | view |
<|file_name|>ViewGeneratorService.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def <|fim_middle|>( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST )<|fim▁end|> | runTask |
<|file_name|>returnTypeInNewNumpyDocString_after.py<|end_file_name|><|fim▁begin|>def f(x):
"""
Returns
-------
object<|fim▁hole|> return 42<|fim▁end|> | """ |
<|file_name|>returnTypeInNewNumpyDocString_after.py<|end_file_name|><|fim▁begin|>def f(x):
<|fim_middle|>
<|fim▁end|> | """
Returns
-------
object
"""
return 42 |
<|file_name|>returnTypeInNewNumpyDocString_after.py<|end_file_name|><|fim▁begin|>def <|fim_middle|>(x):
"""
Returns
-------
object
"""
return 42<|fim▁end|> | f |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django.contrib.admin import TabularInline
from .models import GalleryPhoto
class PhotoInline(TabularInline):
"""
Tabular inline that will be displayed in the gallery form during frontend
editing or in the admin site.<|fim▁hole|><|fim▁end|> | """
model = GalleryPhoto
fk_name = "gallery" |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django.contrib.admin import TabularInline
from .models import GalleryPhoto
class PhotoInline(TabularInline):
<|fim_middle|>
<|fim▁end|> | """
Tabular inline that will be displayed in the gallery form during frontend
editing or in the admin site.
"""
model = GalleryPhoto
fk_name = "gallery" |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""<|fim▁hole|>
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""<|fim▁end|> | |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
<|fim_middle|>
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | """
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
<|fim_middle|>
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions() |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
<|fim_middle|>
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | """
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
<|fim_middle|>
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | """
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version)) |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
<|fim_middle|>
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | """
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
<|fim_middle|>
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | """
Exception for module subclass errors.
""" |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
<|fim_middle|>
<|fim▁end|> | """
Exception for module version errors.
""" |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
<|fim_middle|>
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__)) |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
<|fim_middle|>
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version)) |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
<|fim_middle|>
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,)) |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
<|fim_middle|>
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version)) |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def <|fim_middle|>(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | __init__ |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def <|fim_middle|>(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | _instantiate_modules |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def <|fim_middle|>(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | _check_module_versions |
<|file_name|>Scheduler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def <|fim_middle|>(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
<|fim▁end|> | get_module_select_queries |
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):<|fim▁hole|> otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs<|fim▁end|> | Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]` |
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
<|fim_middle|>
<|fim▁end|> | """Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
|
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
ass<|fim_middle|>
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
<|fim▁end|> | ert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
|
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""<|fim_middle|>
<|fim▁end|> | Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
|
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __i<|fim_middle|>lf,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
<|fim▁end|> | nit__(se |
<|file_name|>student_cnn_xe.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __c<|fim_middle|>lf, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, shape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
<|fim▁end|> | all__(se |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
<|fim▁hole|> creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')<|fim▁end|> | client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
c<|fim_middle|>
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | lient_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
r<|fim_middle|>
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | eturn unicode(self.client_id)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
c<|fim_middle|>
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | lient = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
u<|fim_middle|>
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | nique_together = ('client', 'code')
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
r<|fim_middle|>
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | eturn unicode(self.code)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
t<|fim_middle|>
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | oken = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
r<|fim_middle|>
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | eturn unicode(self.token)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
i<|fim_middle|>
<|fim▁end|> | f created is False:
instance.token_set.all().update(creation_timestamp='0')
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
i <|fim_middle|>
<|fim▁end|> | nstance.token_set.all().update(creation_timestamp='0')
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def _<|fim_middle|>self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | _unicode__( |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def _<|fim_middle|>self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | _unicode__( |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def _<|fim_middle|>self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def invalidate_tokens_on_change(sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | _unicode__( |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class Application(models.Model):
client_id = models.CharField(_('Client ID'), max_length=40, blank=False, primary_key=True)
client_secret = models.CharField(_('Client secret'), max_length=40, blank=False)
name = models.CharField(_('Application Name'), max_length=40, blank=False)
home_url = models.CharField(_('URL'), max_length=255, blank=False)
redirect_uri = models.CharField(_('Redirect URI'), max_length=255, blank=True)
def __unicode__(self):
return unicode(self.client_id)
class Code(models.Model):
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=255, blank=False)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
class Meta:
unique_together = ('client', 'code')
def __unicode__(self):
return unicode(self.code)
class Token(models.Model):
token = models.CharField(_('Token'), max_length=40, blank=False, primary_key=True)
client = models.ForeignKey(Application)
user = models.ForeignKey(User)
scope = models.CharField(_('Scope'), max_length=255, blank=True)
token_type = models.CharField(_('Token type'), max_length=10, blank=False)
refresh_token = models.CharField(_('Refresh token'), max_length=40, blank=True)
creation_timestamp = models.CharField(_('Creation timestamp'), max_length=40, blank=False)
expires_in = models.CharField(_('Expires in'), max_length=40, blank=True)
def __unicode__(self):
return unicode(self.token)
@receiver(post_save, sender=Application)
def i<|fim_middle|>sender, instance, created, raw, **kwargs):
if created is False:
instance.token_set.all().update(creation_timestamp='0')
<|fim▁end|> | nvalidate_tokens_on_change( |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:<|fim▁hole|> self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)<|fim▁end|> | |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
<|fim_middle|>
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | """
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
<|fim_middle|>
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
<|fim_middle|>
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
<|fim_middle|>
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | """
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
<|fim_middle|>
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | """
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
<|fim_middle|>
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | """
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
<|fim_middle|>
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | return self.wlm.coef_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
<|fim_middle|>
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | return np.sum(self.coef_) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
<|fim_middle|>
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | return self.wlm.intercept_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
<|fim_middle|>
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | return self.wlm.alpha_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
<|fim_middle|>
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | return self.wlm.n_iter_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
<|fim_middle|>
<|fim▁end|> | """
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
<|fim_middle|>
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
<|fim_middle|>
<|fim▁end|> | inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
<|fim_middle|>
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
<|fim_middle|>
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | wlm_factory = getattr(weighted_linear_model, wlm_factory) |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
<|fim_middle|>
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.wlm = wlm |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
<|fim_middle|>
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
<|fim_middle|>
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.basis_lag_ = basis_lag |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
<|fim_middle|>
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.big_n_hat_ = self.predict_big_n() |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
<|fim_middle|>
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
<|fim_middle|>
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.basis_lag_ = basis_lag |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
<|fim_middle|>
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | self.big_n_hat_ = self.predict_big_n() |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def <|fim_middle|>(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | __init__ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.