prompt
large_stringlengths
72
9.34k
completion
large_stringlengths
0
7.61k
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>"""Let's Encrypt constants.""" import logging from acme import challenges SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins" """Setuptools entry point group name for plugins.""" CLI_DEFAULTS = dict( config_files=["/etc/letsencrypt/cli.ini"], verbose_count=-(logging.WARNING / 10), server="https://www.letsencrypt-demo.org/acme/new-reg", rsa_key_size=2048, rollback_checkpoints=0, config_dir="/etc/letsencrypt", work_dir="/var/lib/letsencrypt", backup_dir="/var/lib/letsencrypt/backups", key_dir="/etc/letsencrypt/keys", certs_dir="/etc/letsencrypt/certs", cert_path="/etc/letsencrypt/certs/cert-letsencrypt.pem", chain_path="/etc/letsencrypt/certs/chain-letsencrypt.pem", renewer_config_file="/etc/letsencrypt/renewer.conf", no_verify_ssl=False, dvsni_port=challenges.DVSNI.PORT, ) """Defaults for CLI flags and `.IConfig` attributes.""" RENEWER_DEFAULTS = dict( renewer_config_file="/etc/letsencrypt/renewer.conf", renewal_configs_dir="/etc/letsencrypt/configs", archive_dir="/etc/letsencrypt/archive", live_dir="/etc/letsencrypt/live", renewer_enabled="yes", renew_before_expiry="30 days", deploy_before_expiry="20 days",<|fim▁hole|>) """Defaults for renewer script.""" EXCLUSIVE_CHALLENGES = frozenset([frozenset([ challenges.DVSNI, challenges.SimpleHTTP])]) """Mutually exclusive challenges.""" ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"] """List of possible :class:`letsencrypt.interfaces.IInstaller` enhancements. List of expected options parameters: - redirect: None - http-header: TODO - ocsp-stapling: TODO - spdy: TODO """ CONFIG_DIRS_MODE = 0o755 """Directory mode for ``.IConfig.config_dir`` et al.""" TEMP_CHECKPOINT_DIR = "temp_checkpoint" """Temporary checkpoint directory (relative to IConfig.work_dir).""" IN_PROGRESS_DIR = "IN_PROGRESS" """Directory used before a permanent checkpoint is finalized (relative to IConfig.work_dir).""" CERT_KEY_BACKUP_DIR = "keys-certs" """Directory where all certificates and keys are stored (relative to IConfig.work_dir. Used for easy revocation.""" ACCOUNTS_DIR = "accounts" """Directory where all accounts are saved.""" ACCOUNT_KEYS_DIR = "keys" """Directory where account keys are saved. Relative to ACCOUNTS_DIR.""" REC_TOKEN_DIR = "recovery_tokens" """Directory where all recovery tokens are saved (relative to IConfig.work_dir)."""<|fim▁end|>
<|file_name|>model_control_one_enabled_Difference_LinearTrend_Seasonal_Hour_NoAR.py<|end_file_name|><|fim▁begin|>import tests.model_control.test_ozone_custom_models_enabled as testmod<|fim▁hole|>testmod.build_model( ['Difference'] , ['LinearTrend'] , ['Seasonal_Hour'] , ['NoAR'] );<|fim▁end|>
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot<|fim▁hole|> self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": tests.main()<|fim▁end|>
class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord'))
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): <|fim_middle|> if __name__ == "__main__": tests.main() <|fim▁end|>
def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis')
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): <|fim_middle|> def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": tests.main() <|fim▁end|>
qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis')
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): <|fim_middle|> if __name__ == "__main__": tests.main() <|fim▁end|>
qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis')
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: <|fim_middle|> @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": tests.main() <|fim▁end|>
import iris.quickplot as qplt
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": <|fim_middle|> <|fim▁end|>
tests.main()
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def <|fim_middle|>(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def test_xaxis_labels(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": tests.main() <|fim▁end|>
test_yaxis_labels
<|file_name|>test_points.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the `iris.quickplot.points` function.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.tests.unit.plot import TestGraphicStringCoord if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.points(self.cube, coords=('bar', 'str_coord')) self.assertBoundsTickLabels('yaxis') def <|fim_middle|>(self): qplt.points(self.cube, coords=('str_coord', 'bar')) self.assertBoundsTickLabels('xaxis') if __name__ == "__main__": tests.main() <|fim▁end|>
test_xaxis_labels
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf)<|fim▁hole|> pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj<|fim▁end|>
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version <|fim_middle|> <|fim▁end|>
VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF <|fim_middle|> def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context)
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): <|fim_middle|> def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
"""In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context)
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): <|fim_middle|> def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
"""add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy)
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): <|fim_middle|> def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
"""remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!")
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): <|fim_middle|> @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
"""Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context)
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): <|fim_middle|> @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
"""Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): <|fim_middle|> @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): <|fim_middle|> <|fim▁end|>
"""Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': <|fim_middle|> super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
raise exception.InvalidDeployType()
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': <|fim_middle|> for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
raise exception.InvalidDeployType()
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': <|fim_middle|> for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
raise exception.InvalidDeployType()
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): <|fim_middle|> vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
LOG.warning("The vf already exists") return None
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): <|fim_middle|> LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: <|fim_middle|> return obj <|fim▁end|>
obj.virtual_function_list = []
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def <|fim_middle|>(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
create
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def <|fim_middle|>(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
save
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def <|fim_middle|>(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
add_vf
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def <|fim_middle|>(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
delete_vf
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def <|fim_middle|>(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
destroy
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def <|fim_middle|>(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
get
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def <|fim_middle|>(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def _from_db_object(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
get_by_filter
<|file_name|>physical_function.py<|end_file_name|><|fim▁begin|># Copyright 2018 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging from oslo_versionedobjects import base as object_base from cyborg.common import exception from cyborg.db import api as dbapi from cyborg.objects import base from cyborg.objects import fields as object_fields from cyborg.objects.deployable import Deployable from cyborg.objects.virtual_function import VirtualFunction LOG = logging.getLogger(__name__) @base.CyborgObjectRegistry.register class PhysicalFunction(Deployable): # Version 1.0: Initial version VERSION = '1.0' virtual_function_list = [] def create(self, context): # To ensure the creating type is PF if self.type != 'pf': raise exception.InvalidDeployType() super(PhysicalFunction, self).create(context) def save(self, context): """In addition to save the pf, it should also save the vfs associated with this pf """ # To ensure the saving type is PF if self.type != 'pf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: exist_vf.save(context) super(PhysicalFunction, self).save(context) def add_vf(self, vf): """add a vf object to the virtual_function_list. If the vf already exists, it will ignore, otherwise, the vf will be appended to the list """ if not isinstance(vf, VirtualFunction) or vf.type != 'vf': raise exception.InvalidDeployType() for exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): LOG.warning("The vf already exists") return None vf.parent_uuid = self.uuid vf.root_uuid = self.root_uuid vf_copy = copy.deepcopy(vf) self.virtual_function_list.append(vf_copy) def delete_vf(self, context, vf): """remove a vf from the virtual_function_list if the vf does not exist, ignore it """ for idx, exist_vf in self.virtual_function_list: if base.obj_equal_prims(vf, exist_vf): removed_vf = self.virtual_function_list.pop(idx) removed_vf.destroy(context) return LOG.warning("The removing vf does not exist!") def destroy(self, context): """Delete a the pf from the DB.""" del self.virtual_function_list[:] super(PhysicalFunction, self).destroy(context) @classmethod def get(cls, context, uuid): """Find a DB Physical Function and return an Obj Physical Function. In addition, it will also finds all the Virtual Functions associated with this Physical Function and place them in virtual_function_list """ db_pf = cls.dbapi.deployable_get(context, uuid) obj_pf = cls._from_db_object(cls(context), db_pf) pf_uuid = obj_pf.uuid query = {"parent_uuid": pf_uuid, "type": "vf"} db_vf_list = cls.dbapi.deployable_get_by_filters(context, query) for db_vf in db_vf_list: obj_vf = VirtualFunction.get(context, db_vf.uuid) obj_pf.virtual_function_list.append(obj_vf) return obj_pf @classmethod def get_by_filter(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, join=None): obj_dpl_list = [] filters['type'] = 'pf' db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, join_columns=join) for db_dpl in db_dpl_list: obj_dpl = cls._from_db_object(cls(context), db_dpl) query = {"parent_uuid": obj_dpl.uuid} vf_get_list = VirtualFunction.get_by_filter(context, query) obj_dpl.virtual_function_list = vf_get_list obj_dpl_list.append(obj_dpl) return obj_dpl_list @classmethod def <|fim_middle|>(cls, obj, db_obj): """Converts a physical function to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ obj = Deployable._from_db_object(obj, db_obj) if cls is PhysicalFunction: obj.virtual_function_list = [] return obj <|fim▁end|>
_from_db_object
<|file_name|>scd.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
# extension imports from _NetworKit import PageRankNibble, GCE
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum():<|fim▁hole|> errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors<|fim▁end|>
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): <|fim_middle|> # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): <|fim_middle|> def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): <|fim_middle|> <|fim▁end|>
errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: <|fim_middle|> return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: <|fim_middle|> return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
unbalanced[reaction] = balance
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: <|fim_middle|> if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id)
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): <|fim_middle|> elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
errors.append("Reaction '%s' has infinite lower_bound" % reaction.id)
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): <|fim_middle|> if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id)
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): <|fim_middle|> elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
errors.append("Reaction '%s' has infinite upper_bound" % reaction.id)
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): <|fim_middle|> return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id)
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: <|fim_middle|> return errors <|fim▁end|>
if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula))
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): <|fim_middle|> return errors <|fim▁end|>
errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula))
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def <|fim_middle|>(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
check_mass_balance
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def <|fim_middle|>(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
check_reaction_bounds
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % reaction.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def <|fim_middle|>(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors <|fim▁end|>
check_metabolite_compartment_formula
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import local def html():<|fim▁hole|> local('hovercraft -t ./sixfeetup_hovercraft formation_flask.rst ./build/')<|fim▁end|>
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import local def html(): <|fim_middle|> <|fim▁end|>
local('hovercraft -t ./sixfeetup_hovercraft formation_flask.rst ./build/')
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric.api import local def <|fim_middle|>(): local('hovercraft -t ./sixfeetup_hovercraft formation_flask.rst ./build/') <|fim▁end|>
html
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) },<|fim▁hole|> u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']<|fim▁end|>
u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}),
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): <|fim_middle|> <|fim▁end|>
def forwards(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Question.order' <|fim_middle|> def backwards(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']<|fim▁end|>
db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Question.order' <|fim_middle|> models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']<|fim▁end|>
db.delete_column(u'survey_question', 'order')
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def <|fim_middle|>(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']<|fim▁end|>
forwards
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def <|fim_middle|>(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']<|fim▁end|>
backwards
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url from django.contrib import admin from api import views admin.autodiscover() from rest_framework.routers import DefaultRouter router = DefaultRouter() router.register(r'headings', views.HeadingViewSet) router.register(r'users', views.UserViewSet) urlpatterns = patterns('', url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))<|fim▁hole|><|fim▁end|>
)
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyIlmbase(AutotoolsPackage): """The PyIlmBase libraries provides python bindings for the IlmBase libraries.""" homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase" url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz" version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425') depends_on('ilmbase') depends_on('boost+python') # https://github.com/AcademySoftwareFoundation/openexr/issues/336 parallel = False def configure_args(self): spec = self.spec args = [ '--with-boost-python-libname=boost_python{0}'.format( spec['python'].version.up_to(2).joined) ]<|fim▁hole|><|fim▁end|>
return args
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyIlmbase(AutotoolsPackage): <|fim_middle|> <|fim▁end|>
"""The PyIlmBase libraries provides python bindings for the IlmBase libraries.""" homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase" url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz" version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425') depends_on('ilmbase') depends_on('boost+python') # https://github.com/AcademySoftwareFoundation/openexr/issues/336 parallel = False def configure_args(self): spec = self.spec args = [ '--with-boost-python-libname=boost_python{0}'.format( spec['python'].version.up_to(2).joined) ] return args
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyIlmbase(AutotoolsPackage): """The PyIlmBase libraries provides python bindings for the IlmBase libraries.""" homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase" url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz" version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425') depends_on('ilmbase') depends_on('boost+python') # https://github.com/AcademySoftwareFoundation/openexr/issues/336 parallel = False def configure_args(self): <|fim_middle|> <|fim▁end|>
spec = self.spec args = [ '--with-boost-python-libname=boost_python{0}'.format( spec['python'].version.up_to(2).joined) ] return args
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyIlmbase(AutotoolsPackage): """The PyIlmBase libraries provides python bindings for the IlmBase libraries.""" homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase" url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz" version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425') depends_on('ilmbase') depends_on('boost+python') # https://github.com/AcademySoftwareFoundation/openexr/issues/336 parallel = False def <|fim_middle|>(self): spec = self.spec args = [ '--with-boost-python-libname=boost_python{0}'.format( spec['python'].version.up_to(2).joined) ] return args <|fim▁end|>
configure_args
<|file_name|>network_info.py<|end_file_name|><|fim▁begin|>from JumpScale import j descr = """ This jumpscript returns network info """ category = "monitoring" organization = "jumpscale" author = "[email protected]" license = "bsd" version = "1.0"<|fim▁hole|>def action(): return j.sal.nettools.getNetworkInfo() if __name__ == "__main__": print(action())<|fim▁end|>
roles = []
<|file_name|>network_info.py<|end_file_name|><|fim▁begin|>from JumpScale import j descr = """ This jumpscript returns network info """ category = "monitoring" organization = "jumpscale" author = "[email protected]" license = "bsd" version = "1.0" roles = [] def action(): <|fim_middle|> if __name__ == "__main__": print(action()) <|fim▁end|>
return j.sal.nettools.getNetworkInfo()
<|file_name|>network_info.py<|end_file_name|><|fim▁begin|>from JumpScale import j descr = """ This jumpscript returns network info """ category = "monitoring" organization = "jumpscale" author = "[email protected]" license = "bsd" version = "1.0" roles = [] def action(): return j.sal.nettools.getNetworkInfo() if __name__ == "__main__": <|fim_middle|> <|fim▁end|>
print(action())
<|file_name|>network_info.py<|end_file_name|><|fim▁begin|>from JumpScale import j descr = """ This jumpscript returns network info """ category = "monitoring" organization = "jumpscale" author = "[email protected]" license = "bsd" version = "1.0" roles = [] def <|fim_middle|>(): return j.sal.nettools.getNetworkInfo() if __name__ == "__main__": print(action()) <|fim▁end|>
action
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample):<|fim▁hole|> if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)<|fim▁end|>
"""Return the probability the module assignes each label"""
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): <|fim_middle|> <|fim▁end|>
"""A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): <|fim_middle|> def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): <|fim_middle|> def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
"""Reset classification module to status before training""" resetWeights(self.model)
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): <|fim_middle|> def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose)
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): "<|fim_middle|> def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): ""<|fim_middle|> def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
"Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample))
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """R<|fim_middle|> def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
eturn the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """E<|fim_middle|> <|fim▁end|>
xtract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: retu <|fim_middle|> sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
rn 0
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: retu <|fim_middle|> sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
rn [0, 0, 0, 0, 0, 0, 0, 0]
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def <|fim_middle|>(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
__init__
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def <|fim_middle|>(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
resetAllTraining
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def <|fim_middle|>(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
trainOnSample
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def t<|fim_middle|>self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
rain(
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def pr<|fim_middle|>elf, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
edictLabel(s
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def pred<|fim_middle|>f, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def formatInputData(self, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
ictLabelAndProbability(sel
<|file_name|>descriptionreponamelstm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * from keras.models import Sequential from keras.layers import Activation, Dense, LSTM from keras.optimizers import Adam, SGD import numpy as np import abc from ClassificationModule import ClassificationModule class descriptionreponamelstm(ClassificationModule): """A basic lstm neural network""" def __init__(self, num_hidden_layers=3): ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character") hidden_size = 300 self.maxlen = 300 # Set output_size self.output_size = 7 # Hardcoded for 7 classes model = Sequential() # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength()))) for _ in range(num_hidden_layers): model.add(Dense(hidden_size)) model.add(Dense(self.output_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) self.model = model print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" resetWeights(self.model) def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose) def train(self, samples, nb_epoch=200, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(oneHot(getLabelIndex(sample))) train_lables = np.asarray(train_lables) train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights()) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return np.argmax(self.model.predict(sample)) def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.model.predict(sample)[0] return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned def form<|fim_middle|>f, sample): """Extract description and transform to vector""" sd = getDescription(sample) sd += getName(sample) # Returns numpy array which contains 1 array with features return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0) <|fim▁end|>
atInputData(sel
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|># distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] }<|fim▁end|>
# # Unless required by applicable law or agreed to in writing, software
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): <|fim_middle|> def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] } <|fim▁end|>
return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name])
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): <|fim_middle|> def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] } <|fim▁end|>
return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name])
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): <|fim_middle|> <|fim▁end|>
"""Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] }
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def <|fim_middle|>(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] } <|fim▁end|>
GlobalComputeUrl
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def <|fim_middle|>(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] } <|fim▁end|>
ZonalComputeUrl
<|file_name|>instance.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def <|fim_middle|>(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] } <|fim▁end|>
GenerateConfig
<|file_name|>configtest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # Config file test app (together with test.cfg file) # import os, sys sys.path.append("..") import configfile cfg = configfile.ConfigFile("test.cfg") cfg.setCfgValue("name1", "value1") cfg.setCfgValue("name2", "value2") cfg.selectSection("user") cfg.setCfgValue("username", "janis") cfg.setCfgValue("acceptable_names", ["john", "janis"]) cfg.load() print cfg.cfg.options("main") print cfg.cfg.options("user") print cfg.getCfgValue("username") print type(cfg.getCfgValue("username")) print cfg.getCfgValueAsList("acceptable_names") print cfg.getCfgValueAsList("list_in_list") cfg.selectSection("main") print cfg.getCfgValueAsInt("a_number") print type(cfg.getCfgValueAsInt("a_number")) print cfg.getCfgValueAsBool("a_bool") print type(cfg.getCfgValueAsBool("a_bool")) cfg.filename = "test-mod.cfg"<|fim▁hole|>cfg.selectSection("main") cfg.setCfgValue("name1", "value1mod2") cfg.setCfgValue("a_number", 14) cfg.selectSection("user") cfg.setCfgValue("acceptable_names", ["john", "janis", "ivan"]) cfg.setCfgValue("list_in_list2", ["[baz]", "[foo, bar]"]) cfg.setCfgValue("list_in_list3", ["first", "[second-one, second-third]"]) cfg.save()<|fim▁end|>
<|file_name|>mainHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # @author victor li [email protected] # @date 2015/10/07<|fim▁hole|>import baseHandler class MainHandler(baseHandler.RequestHandler): def get(self): self.redirect('/posts/last')<|fim▁end|>
<|file_name|>mainHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # @author victor li [email protected] # @date 2015/10/07 import baseHandler class MainHandler(baseHandler.RequestHandler): <|fim_middle|> <|fim▁end|>
def get(self): self.redirect('/posts/last')
<|file_name|>mainHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # @author victor li [email protected] # @date 2015/10/07 import baseHandler class MainHandler(baseHandler.RequestHandler): def get(self): <|fim_middle|> <|fim▁end|>
self.redirect('/posts/last')
<|file_name|>mainHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # @author victor li [email protected] # @date 2015/10/07 import baseHandler class MainHandler(baseHandler.RequestHandler): def <|fim_middle|>(self): self.redirect('/posts/last') <|fim▁end|>
get
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json f = open('text-stripped-3.json') out = open('text-lines.json', 'w') start_obj = json.load(f) end_obj = {'data': []} characters_on_stage = [] currently_speaking = None last_scene = '1.1' for i in range(len(start_obj['data'])): obj = start_obj['data'][i] if obj['type'] == 'entrance':<|fim▁hole|> if obj['characters'] in characters_on_stage: raise Exception('Character tried to enter stage when already on stage at object ' + str(i)) characters_on_stage = characters_on_stage + obj['characters'] elif obj['type'] == 'exeunt': characters_on_stage = [] elif obj['type'] == 'exit': characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']] elif obj['type'] == 'speaker tag': if obj['speaker'] not in characters_on_stage: raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1]) currently_speaking = obj['speaker'] elif obj['type'] == 'line': if currently_speaking == None: raise Exception('A line did not have an associated speaker at object ' + str(i)) identifier_info = obj['identifier'].split('.') scene = identifier_info[0] + '.' + identifier_info[1] #if scene != last_scene: # if len(characters_on_stage) != 0: # print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage') last_scene = scene end_obj['data'].append({ 'type': 'line', 'identifier': obj['identifier'], 'text': obj['text'].strip(), 'speaker': currently_speaking, 'characters': characters_on_stage }) if len(characters_on_stage) == 0: currently_speaking = None json.dump(end_obj, out)<|fim▁end|>