repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
drixselecta/homebytwo
[ "29d26ce9f5586943e3b64c95aa4ce9ea7263bd10" ]
[ "homebytwo/routes/models/activity.py" ]
[ "from abc import abstractmethod\nfrom typing import List, Optional\n\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.measure import D\nfrom django.core.exceptions import FieldError\nfrom django.db.models import Count\n\nfrom numpy import array\nfrom pandas import DataFrame\nfrom stravalib import unithelper\nfrom stravalib.exc import ObjectNotFound\n\nfrom ...core.models import TimeStampedModel\nfrom ..fields import DataFrameField, NumpyArrayField\nfrom ..prediction_model import PredictionModel\n\nSTREAM_TYPES = [\"time\", \"altitude\", \"distance\", \"moving\"]\nSTRAVA_ACTIVITY_URL = \"https://www.strava.com/activities/{}\"\n\n\ndef athlete_streams_directory_path(instance, filename):\n # streams will upload to MEDIA_ROOT/athlete_<id>/<filename>\n return f\"athlete_{instance.athlete.id}/streams/{filename}\"\n\n\ndef get_default_array():\n \"\"\"\n default array (mutable) for the `regression_coefficients` NumpyArrayField.\n \"\"\"\n return array([0.0, 0.0, 0.0, 0.075, 0.0004, 0.0001, 0.0001]).copy()\n\n\ndef get_default_category():\n \"\"\"\n default list (mutable) for the categories saved by the one-hot encoder ArrayField.\n \"\"\"\n return array([\"None\"]).copy()\n\n\ndef update_user_activities_from_strava(athlete, after=None, before=None, limit=1000):\n \"\"\"\n fetches an athlete's activities from Strava and saves them to the Database.\n It erases the ones that are no more available because they have been deleted\n or set to private and returns all of the athlete's current activities.\n\n Parameters:\n 'after': start date after specified value (UTC). datetime.datetime, str or None.\n 'before': start date before specified value (UTC). datetime.datetime or str or None\n 'limit': maximum activities retrieved. Integer\n\n See https://pythonhosted.org/stravalib/usage/activities.html#list-of-activities and\n https://developers.strava.com/playground/#/Activities/getLoggedInAthleteActivities\n \"\"\"\n\n # retrieve the athlete's activities on Strava\n strava_activities = athlete.strava_client.get_activities(\n before=before, after=after, limit=limit\n )\n\n current_activities = []\n for strava_activity in strava_activities:\n if is_activity_supported(strava_activity):\n activity = Activity.get_or_stub(strava_activity.id, athlete)\n activity.update_with_strava_data(strava_activity)\n current_activities.append(activity)\n\n # delete existing activities that are not in the Strava result\n existing_activities = Activity.objects.filter(athlete=athlete)\n existing_activities.exclude(\n id__in=[activity.id for activity in current_activities]\n ).delete()\n\n return current_activities\n\n\ndef is_activity_supported(strava_activity):\n \"\"\"\n check that the activity was not manually uploaded by the athlete\n and if the activity type is supported by homebytwo\n \"\"\"\n if strava_activity.manual:\n return False\n if strava_activity.type not in ActivityType.SUPPORTED_ACTIVITY_TYPES:\n return False\n return True\n\n\ndef are_streams_valid(strava_streams):\n \"\"\"\n check if all required stream types are present and\n if they all contain values.\n \"\"\"\n if not all(stream_type in strava_streams for stream_type in STREAM_TYPES):\n return False\n if not all(raw_stream.original_size > 0 for raw_stream in strava_streams.values()):\n return False\n return True\n\n\nclass ActivityQuerySet(models.QuerySet):\n def for_user(self, user):\n \"\"\"\n return all routes of a given user.\n this is convenient with the 'request.user' object in views.\n \"\"\"\n return self.filter(athlete=user.athlete)\n\n\nclass ActivityManager(models.Manager):\n def get_queryset(self):\n return ActivityQuerySet(self.model, using=self._db)\n\n def for_user(self, user):\n return self.get_queryset().for_user(user)\n\n\nclass Activity(TimeStampedModel):\n \"\"\"\n An athlete's Strava activity used to train his prediction models\n \"\"\"\n\n NONE = None\n DEFAULT_RUN = 0\n RACE_RUN = 1\n LONG_RUN = 2\n WORKOUT_RUN = 3\n DEFAULT_RIDE = 10\n RACE_RIDE = 11\n WORKOUT_RIDE = 12\n\n WORKOUT_TYPE_CHOICES = [\n (NONE, \"None\"),\n (DEFAULT_RUN, \"default run\"),\n (RACE_RUN, \"race run\"),\n (LONG_RUN, \"long run\"),\n (WORKOUT_RUN, \"workout run\"),\n (DEFAULT_RIDE, \"default ride\"),\n (RACE_RIDE, \"race ride\"),\n (WORKOUT_RIDE, \"workout ride\"),\n ]\n\n # name of the activity as imported from Strava\n name = models.CharField(max_length=255)\n\n # description of the activity as imported from Strava\n description = models.TextField(blank=True)\n\n # Activity ID on Strava\n strava_id = models.BigIntegerField(unique=True)\n\n # Starting date and time of the activity in UTC\n start_date = models.DateTimeField()\n\n # Athlete whose activities have been imported from Strava\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"activities\"\n )\n\n # Athlete whose activities have been imported from Strava\n activity_type = models.ForeignKey(\n \"ActivityType\", on_delete=models.PROTECT, related_name=\"activities\"\n )\n\n # Total activity distance\n distance = models.FloatField(\"Activity distance in m\", blank=True, null=True)\n\n # elevation gain in m\n total_elevation_gain = models.FloatField(\n \"Total elevation gain in m\", blank=True, null=True\n )\n\n # total duration of the activity in seconds as opposed to moving time\n elapsed_time = models.DurationField(\n \"Total activity time as timedelta\", blank=True, null=True\n )\n\n # time in movement during the activity\n moving_time = models.DurationField(\n \"Movement time as timedelta\", blank=True, null=True\n )\n\n # streams retrieved from the Strava API\n streams = DataFrameField(\n null=True, upload_to=athlete_streams_directory_path, unique_fields=[\"strava_id\"]\n )\n\n # skip trying to import streams from Strava\n skip_streams_import = models.BooleanField(default=False)\n\n # Workout Type as defined in Strava\n workout_type = models.SmallIntegerField(\n choices=WORKOUT_TYPE_CHOICES, blank=True, null=True\n )\n\n # is the activity flagged as a commute?\n commute = models.BooleanField(default=False)\n\n # Gear used if any\n gear = models.ForeignKey(\n \"Gear\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"activities\",\n )\n\n class Meta:\n ordering = [\"-start_date\"]\n verbose_name_plural = \"activities\"\n\n # Custom manager\n objects = ActivityManager()\n\n def __str__(self):\n return \"{0}: {1} - {2}\".format(self.activity_type, self.name, self.athlete)\n\n def get_strava_url(self):\n # return the absolute URL to the activity on Strava\n return STRAVA_ACTIVITY_URL.format(self.strava_id)\n\n def get_distance(self):\n # return the activity distance as a Distance object\n return D(m=self.distance)\n\n def get_total_elevation_gain(self):\n # return the activity distance as a Distance object\n return D(m=self.total_elevation_gain)\n\n @classmethod\n def get_or_stub(cls, strava_id, athlete):\n \"\"\"\n use Strava id to return an activity from the database or an activity stub\n \"\"\"\n try:\n activity = cls.objects.get(strava_id=strava_id)\n except cls.DoesNotExist:\n activity = cls(strava_id=strava_id, athlete=athlete)\n\n return activity\n\n def get_activity_from_strava(self):\n \"\"\"\n retrieve single activity information from Strava.\n \"\"\"\n try:\n strava_activity = self.athlete.strava_client.get_activity(self.strava_id)\n\n # Activity was was deleted or made private on Strava\n except ObjectNotFound:\n if self.id:\n self.delete()\n\n # strava activity was found on Strava\n else:\n return strava_activity\n\n def update_with_strava_data(self, strava_activity, commit=True):\n \"\"\"\n update an activity based on information received from Strava.\n\n :param strava_activity: the activity object returned by the Strava API client.\n :param commit: save Strava activity to the database\n \"\"\"\n\n # fields from the Strava API object mapped to the Activity Model\n fields_map = {\n \"name\": strava_activity.name,\n \"activity_type\": strava_activity.type,\n \"start_date\": strava_activity.start_date,\n \"elapsed_time\": strava_activity.elapsed_time,\n \"moving_time\": strava_activity.moving_time,\n \"description\": strava_activity.description,\n \"workout_type\": strava_activity.workout_type,\n \"distance\": unithelper.meters(strava_activity.distance),\n \"total_elevation_gain\": unithelper.meters(\n strava_activity.total_elevation_gain\n ),\n \"gear\": strava_activity.gear_id,\n \"commute\": strava_activity.commute,\n }\n\n # find or create the activity type\n fields_map[\"activity_type\"], created = ActivityType.objects.get_or_create(\n name=strava_activity.type\n )\n\n if strava_activity.gear_id:\n # resolve foreign key relationship for gear and get gear info if new\n fields_map[\"gear\"], created = Gear.objects.get_or_create(\n strava_id=strava_activity.gear_id, athlete=self.athlete\n )\n if created:\n fields_map[\"gear\"].update_from_strava()\n\n # transform description text to empty if None\n if strava_activity.description is None:\n fields_map[\"description\"] = \"\"\n\n # update activity information\n for key, value in fields_map.items():\n setattr(self, key, value)\n\n if commit:\n self.save()\n\n def update_activity_streams_from_strava(self):\n \"\"\"\n save activity streams from Strava in a pandas DataFrame.\n returns True if streams could be imported.\n \"\"\"\n strava_streams = self.get_streams_from_strava()\n\n if strava_streams and are_streams_valid(strava_streams):\n self.streams = DataFrame(\n {key: stream.data for key, stream in strava_streams.items()}\n )\n self.save(update_fields=[\"streams\"])\n return True\n\n # otherwise, skip trying to get the streams next time\n self.skip_streams_import = True\n self.save(update_fields=[\"skip_streams_import\"])\n return False\n\n def get_streams_from_strava(self, resolution=\"low\"):\n \"\"\"\n Return activity streams from Strava: Time, Altitude, Distance and Moving.\n\n Only activities with all four required types of stream present will be returned.\n Setting a 'low' resolution provides free downsampling of the data\n for better accuracy in the prediction.\n \"\"\"\n\n strava_client = self.athlete.strava_client\n return strava_client.get_activity_streams(\n self.strava_id, types=STREAM_TYPES, resolution=resolution\n )\n\n def get_training_data(self):\n \"\"\"\n return activity data for training the linear regression model.\n \"\"\"\n\n # load activity streams as a DataFrame\n activity_data = self.streams\n\n # calculate gradient in percents, pace in minutes/kilometer and\n # cumulative elevation gain\n activity_data[\"step_distance\"] = activity_data.distance.diff()\n activity_data[\"gradient\"] = (\n activity_data.altitude.diff() / activity_data.step_distance * 100\n )\n activity_data[\"pace\"] = activity_data.time.diff() / activity_data.step_distance\n activity_data[\"cumulative_elevation_gain\"] = activity_data.altitude.diff()[\n activity_data.altitude.diff() >= 0\n ].cumsum()\n activity_data[\n \"cumulative_elevation_gain\"\n ] = activity_data.cumulative_elevation_gain.fillna(method=\"ffill\").fillna(\n value=0\n )\n\n # remove rows with empty gradient or empty pace\n columns = [\"gradient\", \"pace\"]\n activity_data = activity_data[activity_data[columns].notnull().all(1)].copy()\n\n # add activity information to every row\n activity_properties = {\n \"strava_id\": self.strava_id,\n \"start_date\": self.start_date,\n \"total_elevation_gain\": self.total_elevation_gain,\n \"total_distance\": self.distance,\n \"gear\": self.gear.strava_id if self.gear else \"None\",\n \"workout_type\": self.get_workout_type_display()\n if self.workout_type or self.workout_type == 0\n else \"None\",\n \"commute\": self.commute,\n }\n\n return activity_data.assign(\n **{key: value for key, value in activity_properties.items()}\n )\n\n\nclass PredictedModel(models.Model):\n \"\"\"\n base Model for training and persisting schedule prediction models\n\n Subclassed by ActivityType and ActivityPerformance.\n \"\"\"\n\n # list of regression coefficients as trained by the regression model\n regression_coefficients = NumpyArrayField(\n models.FloatField(), default=get_default_array\n )\n\n # flat pace in seconds per meter: the intercept of the regression\n flat_parameter = models.FloatField(default=0.36) # 6:00/km or 10km/h\n\n # workout_type categories found by the prediction model\n workout_type_categories = NumpyArrayField(\n models.CharField(max_length=50),\n default=get_default_category,\n )\n\n # reliability and cross_validation scores of the prediction model\n # between 0.0 and 1.0\n model_score = models.FloatField(default=0.0)\n cv_scores = NumpyArrayField(models.FloatField(), default=get_default_array)\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n set activity_type and adapt to the number of categorical columns.\n\n self._activity_type is required to remove outliers in the training data based\n on max and min speed and gradient.\n\n categorical columns determines the shape of the regression_coefficients array\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # set _activity_type to self or related Model\n if hasattr(self, \"activity_type\"):\n self._activity_type = self.activity_type\n elif isinstance(self, ActivityType):\n self._activity_type = self\n else:\n raise FieldError(f\"Cannot find activity_type for {self}\")\n\n # set default value for regression_coefficients based on the number of\n # categorical columns present in the Model\n categorical_coefficients = [0.0] * len(self.get_categorical_columns())\n numerical_coefficients = [0.0, 0.075, 0.0004, 0.0001, 0.0001]\n coefficients = self._meta.get_field(\"regression_coefficients\")\n coefficients.default = array(categorical_coefficients + numerical_coefficients)\n\n @abstractmethod\n def get_training_activities(self, max_num_activities: Optional[int]):\n \"\"\"\n retrieve activities to train the prediction model\n\n must be implemented in the subclasses.\n \"\"\"\n raise NotImplementedError\n\n def get_training_data(self, limit_activities: Optional[int] = None) -> DataFrame:\n \"\"\"\n retrieve training data for the prediction model\n\n :param limit_activities: maximum number of Strava activities used to feed the\n prediction model, defaults to `None`, i.e. all available activities\n \"\"\"\n target_activities = self.get_training_activities(limit_activities)\n\n # collect activity_data into a pandas DataFrame\n observations = DataFrame()\n for activity in target_activities:\n observations = observations.append(\n activity.get_training_data(), sort=True, ignore_index=True\n )\n\n return observations\n\n def remove_outliers(self, observations):\n \"\"\"\n remove speed and gradient outliers from training data based on ActivityType\n \"\"\"\n return observations[\n (observations.pace > self._activity_type.min_pace)\n & (observations.pace < self._activity_type.max_pace)\n & (observations.gradient > self._activity_type.min_gradient)\n & (observations.gradient < self._activity_type.max_gradient)\n ]\n\n @classmethod\n def get_categorical_columns(cls) -> List[str]:\n \"\"\"\n determine columns to use for categorical data based on available Model fields\n\n ActivityPerformance has two fields: gear_categories, workout_type_categories\n ActivityType has one: workout_type_categories\n \"\"\"\n possible_columns = [\"gear\", \"workout_type\"]\n return list(filter(lambda c: hasattr(cls, f\"{c}_categories\"), possible_columns))\n\n def train_prediction_model(self, limit_activities: Optional[int] = None) -> str:\n \"\"\"\n train prediction model for ActivityType or ActivityPerformance\n\n :param limit_activities: max number of activities considered for training\n :return: description of the training result\n \"\"\"\n\n observations = self.get_training_data(limit_activities=limit_activities)\n if observations.empty:\n return (\n f\"No training data found for activity type: {self._activity_type.name}\"\n )\n\n # remove outliers\n data = self.remove_outliers(observations)\n\n # determine categorical columns for model training\n categorical_columns = self.get_categorical_columns()\n\n # train prediction model\n prediction_model = PredictionModel(categorical_columns=categorical_columns)\n feature_columns = (\n prediction_model.numerical_columns + prediction_model.categorical_columns\n )\n prediction_model.train(\n y=data[\"pace\"],\n x=data[feature_columns].fillna(value=\"None\"),\n )\n\n # save model score\n self.model_score = prediction_model.model_score\n self.cv_scores = prediction_model.cv_scores\n\n # save coefficients and intercept\n regression = prediction_model.pipeline.named_steps[\"linearregression\"]\n self.regression_coefficients = regression.coef_\n self.flat_parameter = regression.intercept_\n\n # save categories from categorical columns\n for index, column in enumerate(prediction_model.categorical_columns):\n setattr(\n self,\n f\"{column}_categories\",\n prediction_model.onehot_encoder_categories[index],\n )\n\n self.save()\n\n message = (\n f\"{self} successfully trained with {data.shape[0]} observations. \"\n f\"Model score: {self.model_score}, \"\n f\"cross-validation score: {self.cv_scores}. \"\n )\n return message\n\n def get_prediction_model(self) -> PredictionModel:\n \"\"\"\n restore the Prediction Model from the saved parameters\n \"\"\"\n\n # retrieve categorical columns and values\n categorical_columns = self.get_categorical_columns()\n onehot_encoder_categories = []\n for column in categorical_columns:\n onehot_encoder_categories.append(getattr(self, column + \"_categories\"))\n\n return PredictionModel(\n regression_intercept=self.flat_parameter,\n regression_coefficients=self.regression_coefficients,\n categorical_columns=categorical_columns,\n onehot_encoder_categories=onehot_encoder_categories,\n )\n\n\nclass ActivityTypeQuerySet(models.QuerySet):\n def predicted(self):\n \"\"\"\n retrieve athlete activity_type choices available for schedule prediction\n \"\"\"\n activity_types = self.filter(name__in=ActivityType.SUPPORTED_ACTIVITY_TYPES)\n activity_types = activity_types.exclude(activities=None)\n activity_types = activity_types.annotate(num_activities=Count(\"activities\"))\n return activity_types.order_by(\"-num_activities\")\n\n def for_athlete(self, athlete):\n \"\"\"\n retrieve activity_type choices available for schedule prediction\n \"\"\"\n return self.predicted().filter(activities__athlete=athlete)\n\n\nclass ActivityType(PredictedModel):\n \"\"\"\n ActivityType is used to define default performance values for each type of activity.\n The choice of available activities is limited to the ones available on Strava:\n http://developers.strava.com/docs/reference/#api-models-ActivityType\n \"\"\"\n\n # Strava activity types\n ALPINESKI = \"AlpineSki\"\n BACKCOUNTRYSKI = \"BackcountrySki\"\n CANOEING = \"Canoeing\"\n CROSSFIT = \"Crossfit\"\n EBIKERIDE = \"EBikeRide\"\n ELLIPTICAL = \"Elliptical\"\n GOLF = \"Golf\"\n HANDCYCLE = \"Handcycle\"\n HIKE = \"Hike\"\n ICESKATE = \"IceSkate\"\n INLINESKATE = \"InlineSkate\"\n KAYAKING = \"Kayaking\"\n KITESURF = \"Kitesurf\"\n NORDICSKI = \"NordicSki\"\n RIDE = \"Ride\"\n ROCKCLIMBING = \"RockClimbing\"\n ROLLERSKI = \"RollerSki\"\n ROWING = \"Rowing\"\n RUN = \"Run\"\n SAIL = \"Sail\"\n SKATEBOARD = \"Skateboard\"\n SNOWBOARD = \"Snowboard\"\n SNOWSHOE = \"Snowshoe\"\n SOCCER = \"Soccer\"\n STAIRSTEPPER = \"StairStepper\"\n STANDUPPADDLING = \"StandUpPaddling\"\n SURFING = \"Surfing\"\n SWIM = \"Swim\"\n VELOMOBILE = \"Velomobile\"\n VIRTUALRIDE = \"VirtualRide\"\n VIRTUALRUN = \"VirtualRun\"\n WALK = \"Walk\"\n WEIGHTTRAINING = \"WeightTraining\"\n WHEELCHAIR = \"Wheelchair\"\n WINDSURF = \"Windsurf\"\n WORKOUT = \"Workout\"\n YOGA = \"Yoga\"\n\n ACTIVITY_NAME_CHOICES = [\n (ALPINESKI, \"Alpine Ski\"),\n (BACKCOUNTRYSKI, \"Backcountry Ski\"),\n (CANOEING, \"Canoeing\"),\n (CROSSFIT, \"Crossfit\"),\n (EBIKERIDE, \"E-Bike Ride\"),\n (ELLIPTICAL, \"Elliptical\"),\n (GOLF, \"Golf\"),\n (HANDCYCLE, \"Handcycle\"),\n (HIKE, \"Hike\"),\n (ICESKATE, \"Ice Skate\"),\n (INLINESKATE, \"Inline Skate\"),\n (KAYAKING, \"Kayaking\"),\n (KITESURF, \"Kitesurf\"),\n (NORDICSKI, \"Nordic Ski\"),\n (RIDE, \"Ride\"),\n (ROCKCLIMBING, \"Rock Climbing\"),\n (ROLLERSKI, \"Roller Ski\"),\n (ROWING, \"Rowing\"),\n (RUN, \"Run\"),\n (SAIL, \"Sail\"),\n (SKATEBOARD, \"Skateboard\"),\n (SNOWBOARD, \"Snowboard\"),\n (SNOWSHOE, \"Snowshoe\"),\n (SOCCER, \"Soccer\"),\n (STAIRSTEPPER, \"Stair Stepper\"),\n (STANDUPPADDLING, \"Stand-Up Paddling\"),\n (SURFING, \"Surfing\"),\n (SWIM, \"Swim\"),\n (VELOMOBILE, \"Velomobile\"),\n (VIRTUALRIDE, \"Virtual Ride\"),\n (VIRTUALRUN, \"Virtual Run\"),\n (WALK, \"Walk\"),\n (WEIGHTTRAINING, \"Weight Training\"),\n (WHEELCHAIR, \"Wheelchair\"),\n (WINDSURF, \"Windsurf\"),\n (WORKOUT, \"Workout\"),\n (YOGA, \"Yoga\"),\n ]\n\n SUPPORTED_ACTIVITY_TYPES = {\n BACKCOUNTRYSKI,\n EBIKERIDE,\n HANDCYCLE,\n HIKE,\n INLINESKATE,\n NORDICSKI,\n RIDE,\n ROCKCLIMBING,\n ROLLERSKI,\n RUN,\n SNOWSHOE,\n VELOMOBILE,\n VIRTUALRIDE,\n VIRTUALRUN,\n WALK,\n WHEELCHAIR,\n }\n\n name = models.CharField(max_length=24, choices=ACTIVITY_NAME_CHOICES, unique=True)\n\n # min and max plausible gradient and speed to filter outliers in activity data.\n min_pace = models.FloatField(default=0.1) # 1:40/km or 36 km/h\n max_pace = models.FloatField(default=2.4) # 40:00/km or 1.5 km/h\n min_gradient = models.FloatField(default=-100.0) # 100% or -45°\n max_gradient = models.FloatField(default=100.0) # 100% or 45°\n\n objects = ActivityTypeQuerySet.as_manager()\n\n def __str__(self):\n return self.name\n\n def get_training_activities(self, limit=None):\n \"\"\"\n retrieve Strava activities to train the prediction model\n \"\"\"\n return self.activities.filter(streams__isnull=False)[:limit]\n\n\nclass ActivityPerformance(PredictedModel, TimeStampedModel):\n \"\"\"\n Athlete prediction model for an activity type calculated from his Strava history.\n\n Based on the athlete's past activities on strava, we train a linear regression model\n to predict the athlete's pace on a route. The pace of the athlete depends on the\n *slope* of the travelled segment.\n \"\"\"\n\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"performances\"\n )\n activity_type = models.ForeignKey(\n \"ActivityType\", on_delete=models.PROTECT, related_name=\"performances\"\n )\n # gear categories returned by the prediction model\n gear_categories = NumpyArrayField(\n models.CharField(max_length=50),\n default=get_default_category,\n )\n\n def __str__(self):\n return \"{} - {} - {:.2%}\".format(\n self.athlete.user.username, self.activity_type.name, self.model_score\n )\n\n def get_training_activities(self, limit: int = None):\n \"\"\"\n return the activities that should feed the prediction model\n\n :param limit: maximum number of activities considered\n \"\"\"\n return self.activity_type.activities.filter(\n athlete=self.athlete, streams__isnull=False\n )[:limit]\n\n\nclass Gear(models.Model):\n \"\"\"\n Small helper model to save gear from Strava.\n \"\"\"\n\n strava_id = models.CharField(max_length=24, unique=True)\n name = models.CharField(max_length=100, blank=True)\n brand_name = models.CharField(max_length=100, blank=True)\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"gears\"\n )\n\n def __str__(self):\n return \"{0} - {1}\".format(self.brand_name, self.name)\n\n def update_from_strava(self):\n # retrieve gear info from Strava\n strava_gear = self.athlete.strava_client.get_gear(self.strava_id)\n\n self.name = strava_gear.name\n if strava_gear.brand_name is not None:\n self.brand_name = strava_gear.brand_name\n\n # save\n self.save()\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
NSF-Swift/Spectrum-Access-System
[ "02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf", "02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf" ]
[ "src/harness/testcases/WINNF_FT_S_QPR_testcase.py", "src/harness/reference_models/propagation/wf_itm.py" ]
[ "# Copyright 2018 SAS Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport numpy as np\n\nimport sas\nimport sas_testcase\nfrom util import configurable_testcase, writeConfig, loadConfig, json_load\nfrom reference_models.antenna import antenna\nfrom reference_models.geo import vincenty\nfrom reference_models.geo import zones\nfrom reference_models.propagation import wf_itm\n\n\nclass QuietZoneProtectionTestcase(sas_testcase.SasTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(QuietZoneProtectionTestcase, cls).setUpClass()\n cls.fcc_offices = zones.GetFccOfficeLocations()\n\n def setUp(self):\n self._sas, self._sas_admin = sas.GetTestingSas()\n self._sas_admin.Reset()\n\n def tearDown(self):\n pass\n\n def generate_QPR_2_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.2.\"\"\"\n\n # Load device info\n # CBSD 1: Category A CBSD located within the boundary of the NRAO / NRRO\n # Quiet Zone.\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 39.244586\n device_1['installationParam']['longitude'] = -78.505269\n\n # CBSD 2: Category B CBSD located within the boundary of the NRAO / NRRO\n # Quiet Zone.\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 39.247287\n device_2['installationParam']['longitude'] = -80.489236\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2 is Category B with conditionals pre-loaded.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals = [conditionals_2]\n del device_2['installationParam']\n del device_2['cbsdCategory']\n del device_2['airInterface']\n del device_2['measCapability']\n\n # Create the actual config.\n devices = [device_1, device_2]\n config = {\n 'registrationRequests': devices,\n 'conditionalRegistrationData': conditionals,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_2_default_config)\n def test_WINNF_FT_S_QPR_2(self, config_filename):\n \"\"\"[Configurable] Rejecting Registration of CBSD inside the NRAO/NRRO\n Quiet Zone.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list\n })\n\n # Whitelist FCC IDs and User IDs.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration responses.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n for i, response in enumerate(responses):\n response = responses[i]\n logging.debug('Looking at response number %d', i)\n self.assertNotEqual(response['response']['responseCode'], 0)\n self.assertFalse('cbsdId' in response)\n\n def generate_QPR_5_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.5.\"\"\"\n\n # CBSD 1: Category A CBSD within 3.8 km of the Table Mountain QZ\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 40.121452\n device_1['installationParam']['longitude'] = -105.23381\n\n # CBSD 2: Category B CBSD within 38 km of the Table Mountain QZ\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 40.271668\n device_2['installationParam']['longitude'] = -105.101395\n\n # CBSD 3: Category B CBSD within 54 km of the Table Mountain QZ\n device_3 = json_load(\n os.path.join('testcases', 'testdata', 'device_d.json'))\n device_3['installationParam']['latitude'] = 40.175726\n device_3['installationParam']['longitude'] = -104.654388\n\n # CBSD 4: Category B CBSD within 64 km of the Table Mountain QZ\n device_4 = json_load(\n os.path.join('testcases', 'testdata', 'device_h.json'))\n device_4['installationParam']['latitude'] = 40.074919\n device_4['installationParam']['longitude'] = -104.523926\n\n # CBSD 5: Category B CBSD located within 80 km of the Table Mountain QZ\n device_5 = json_load(\n os.path.join('testcases', 'testdata', 'device_j.json'))\n device_5['installationParam']['latitude'] = 40.525283\n device_5['installationParam']['longitude'] = -104.547272\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2, device_3, device_4 and device_5 are Category B.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n self.assertEqual(device_3['cbsdCategory'], 'B')\n self.assertEqual(device_4['cbsdCategory'], 'B')\n self.assertEqual(device_5['cbsdCategory'], 'B')\n\n # Conditionals to pre-load\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals_3 = {\n 'cbsdCategory': device_3['cbsdCategory'],\n 'fccId': device_3['fccId'],\n 'cbsdSerialNumber': device_3['cbsdSerialNumber'],\n 'airInterface': device_3['airInterface'],\n 'installationParam': device_3['installationParam'],\n 'measCapability': device_3['measCapability']\n }\n conditionals_4 = {\n 'cbsdCategory': device_4['cbsdCategory'],\n 'fccId': device_4['fccId'],\n 'cbsdSerialNumber': device_4['cbsdSerialNumber'],\n 'airInterface': device_4['airInterface'],\n 'installationParam': device_4['installationParam'],\n 'measCapability': device_4['measCapability']\n }\n conditionals_5 = {\n 'cbsdCategory': device_5['cbsdCategory'],\n 'fccId': device_5['fccId'],\n 'cbsdSerialNumber': device_5['cbsdSerialNumber'],\n 'airInterface': device_5['airInterface'],\n 'installationParam': device_5['installationParam'],\n 'measCapability': device_5['measCapability']\n }\n\n # Remove conditionals from registration.\n for device in [device_2, device_3, device_4, device_5]:\n del device['cbsdCategory']\n del device['airInterface']\n del device['installationParam']\n del device['measCapability']\n\n # Grant Requests - N1\n grant1N1 = {\n 'operationParam': {\n 'maxEirp': 19,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant2N1 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3655000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant3N1 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant4N1 = {\n 'operationParam': {\n 'maxEirp': 33,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant5N1 = {\n 'operationParam': {\n 'maxEirp': 30,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3670000000\n }\n }\n }\n # Grant Requests - N2\n grant1N2 = {\n 'operationParam': {\n 'maxEirp': 19,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n grant2N2 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3665000000\n }\n }\n }\n grant3N2 = {\n 'operationParam': {\n 'maxEirp': 36,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n grant4N2 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3680000000\n }\n }\n }\n grant5N2 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3670000000,\n 'highFrequency': 3690000000\n }\n }\n }\n # Create the actual config.\n config = {\n 'registrationRequests': [\n device_1, device_2, device_3, device_4, device_5\n ],\n 'conditionalRegistrationData': [\n conditionals_2, conditionals_3, conditionals_4, conditionals_5\n ],\n 'grantRequestsN1': [grant1N1, grant2N1, grant3N1, grant4N1, grant5N1],\n 'grantRequestsN2': [grant1N2, grant2N2, grant3N2, grant4N2, grant5N2]\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_5_default_config)\n def test_WINNF_FT_S_QPR_5(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within Coordination\n Area around Table Mountain Quiet Zone (QZ) with Multiple Grants.\n \"\"\"\n config = loadConfig(config_filename)\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list,\n 'grantRequestsN1': list,\n 'grantRequestsN2': list\n })\n self.assertEqual(\n len(config['registrationRequests']), len(config['grantRequestsN1']))\n self.assertEqual(\n len(config['grantRequestsN1']), len(config['grantRequestsN2']))\n\n # Whitelist FCC ID and User ID.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Step 1: Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n grant_request_n1 = []\n grant_request_n2 = []\n successful_reg_requests = []\n for i, response in enumerate(responses):\n if response['response']['responseCode'] == 0:\n self.assertTrue('cbsdId' in response)\n successful_reg_requests.append(config['registrationRequests'][i])\n config['grantRequestsN1'][i]['cbsdId'] = response['cbsdId']\n grant_request_n1.append(config['grantRequestsN1'][i])\n config['grantRequestsN2'][i]['cbsdId'] = response['cbsdId']\n grant_request_n2.append(config['grantRequestsN2'][i])\n del request, responses\n\n if not successful_reg_requests:\n return # SAS passes immediately, since none of the registration requests\n # succeeded in this case.\n\n # Step 2: For CBSDs successfully registered in Step 1, Send grant request 1\n request1 = {'grantRequest': grant_request_n1}\n grant_responses1 = self._sas.Grant(request1)['grantResponse']\n # Check grant response 1\n self.assertEqual(len(grant_responses1), len(grant_request_n1))\n\n # Step 3: For CBSDs successfully registered in Step 1, Send grant request 2\n request2 = {'grantRequest': grant_request_n2}\n grant_responses2 = self._sas.Grant(request2)['grantResponse']\n # Check grant response 2\n self.assertEqual(len(grant_responses2), len(grant_request_n2))\n\n for i, device in enumerate(successful_reg_requests):\n if 'installationParam' in device:\n cbsd_information = device\n else:\n for conditional in config['conditionalRegistrationData']:\n if (device['fccId'] == conditional['fccId']) and (\n device['cbsdSerialNumber'] == conditional['cbsdSerialNumber']):\n cbsd_information = conditional\n logging.info(\n 'Looking at device with FccID: %s / CbsdSerialNumber: %s',\n cbsd_information['fccId'], cbsd_information['cbsdSerialNumber'])\n grant_response1 = grant_responses1[i]\n grant_response2 = grant_responses2[i]\n if not (grant_response1['response']['responseCode'] == 0 or\n grant_response2['response']['responseCode'] == 0):\n logging.info('Both grant requests were rejected for this device.')\n continue # Skip further calculation for this device.\n\n # Calculate PL\n logging.info(\n 'Calculating PL for device with FccID: %s / CbsdSerialNumber: %s',\n cbsd_information['fccId'], cbsd_information['cbsdSerialNumber'])\n cbsd_lat = cbsd_information['installationParam']['latitude']\n cbsd_lon = cbsd_information['installationParam']['longitude']\n cbsd_ant_azi = cbsd_information['installationParam']['antennaAzimuth']\n cbsd_ant_beamwidth = cbsd_information['installationParam'][\n 'antennaBeamwidth']\n cbsd_height = cbsd_information['installationParam']['height']\n cbsd_height_type = cbsd_information['installationParam']['heightType']\n is_cbsd_indoor = cbsd_information['installationParam']['indoorDeployment']\n freq_mhz = 3625. # Always in all SAS\n table_mountain_quiet_zone_lat = 40.130660\n table_mountain_quiet_zone_long = -105.244596\n table_mountain_quiet_zone_height = 9 # 9 m: According to the spec\n propagation = wf_itm.CalcItmPropagationLoss(\n cbsd_lat,\n cbsd_lon,\n cbsd_height,\n table_mountain_quiet_zone_lat,\n table_mountain_quiet_zone_long,\n table_mountain_quiet_zone_height,\n cbsd_indoor=is_cbsd_indoor,\n reliability=0.5,\n freq_mhz=freq_mhz,\n is_height_cbsd_amsl=(cbsd_height_type == 'AMSL'))\n pl = propagation.db_loss\n logging.info('Propagation:db_loss: %f', pl)\n bearing = propagation.incidence_angles.hor_cbsd\n logging.info('Bearing: %f', bearing)\n # Calculate effective antenna gain\n max_ant_gain_dbi = cbsd_information['installationParam']['antennaGain']\n ant_gain_dbi = antenna.GetStandardAntennaGains(\n bearing, cbsd_ant_azi, cbsd_ant_beamwidth,\n max_ant_gain_dbi)\n logging.info('Effective Antenna Gain: %f', ant_gain_dbi)\n\n # Gather values required for calculating Total Interference\n grant1_eirp = 0\n if grant_response1['response']['responseCode'] == 0:\n p1 = grant_request_n1[i]['operationParam']['maxEirp']\n logging.info('Grant 1 Max Eirp: %f dBm/MHz', p1)\n bw1 = (grant_request_n1[i]['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n grant_request_n1[i]['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 1 Bandwidth: %f', bw1)\n grant1_eirp = (10**(p1/10.0)) * bw1\n logging.info('Grant 1 EIRP is %f', grant1_eirp)\n\n grant2_eirp = 0\n if grant_response2['response']['responseCode'] == 0:\n p2 = grant_request_n2[i]['operationParam']['maxEirp']\n logging.info('Grant 2 Max Eirp: %f dBm/MHz', p2)\n bw2 = (grant_request_n2[i]['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n grant_request_n2[i]['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 2 Bandwidth: %f', bw2)\n grant2_eirp = (10**(p2/10.0)) * bw2\n logging.info('Grant 2 EIRP is %f', grant2_eirp)\n\n # Step 4: Calculate Total Interference\n total_interference_dbm = ant_gain_dbi - max_ant_gain_dbi + (\n 10 * np.log10(grant1_eirp + grant2_eirp)) - pl\n logging.info('Total Interference is %f dBm', total_interference_dbm)\n\n # CHECK: Total Interference from all approved grants is <= -88.4 dBm\n self.assertLessEqual(total_interference_dbm, -88.4)\n\n def generate_QPR_6_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.6.\"\"\"\n\n # Load device info\n # CBSD 1: Category A within 2.4 km of Waipahu, Hawaii FCC Field Office.\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 21.377719\n device_1['installationParam']['longitude'] = -157.973411\n\n # CBSD 2: Category B within 2.4 km of Allegan, Michigan FCC Field Office.\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 42.586213\n device_2['installationParam']['longitude'] = -85.955594\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2 is Category B with conditionals pre-loaded.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals = [conditionals_2]\n del device_2['installationParam']\n del device_2['cbsdCategory']\n del device_2['airInterface']\n del device_2['measCapability']\n\n # Create the actual config.\n devices = [device_1, device_2]\n config = {\n 'registrationRequests': devices,\n 'conditionalRegistrationData': conditionals,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_6_default_config)\n def test_WINNF_FT_S_QPR_6(self, config_filename):\n \"\"\"[Configurable] Rejecting Registration of CBSDs inside the FCC Protected\n Field Offices Quiet Zone.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list\n })\n\n # Whitelist FCC IDs and User IDs.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration responses.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n for i, response in enumerate(responses):\n response = responses[i]\n logging.debug('Looking at response number %d', i)\n self.assertNotEqual(response['response']['responseCode'], 0)\n self.assertFalse('cbsdId' in response)\n\n def generate_QPR_7_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.7.\"\"\"\n\n # Load device info\n # Cat B - between 2.4 km - 4.8 km of Waipahu, Hawaii Field Office.\n device_b = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_b['installationParam']['latitude'] = 21.397934\n device_b['installationParam']['longitude'] = -158.034459\n\n # device_b is Category B with conditionals pre-loaded.\n self.assertEqual(device_b['cbsdCategory'], 'B')\n conditionals_b = {\n 'cbsdCategory': device_b['cbsdCategory'],\n 'fccId': device_b['fccId'],\n 'cbsdSerialNumber': device_b['cbsdSerialNumber'],\n 'airInterface': device_b['airInterface'],\n 'installationParam': device_b['installationParam'],\n 'measCapability': device_b['measCapability']\n }\n del device_b['installationParam']\n del device_b['cbsdCategory']\n del device_b['airInterface']\n del device_b['measCapability']\n\n # Grant Request\n grant_0 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n\n # Create the actual config.\n config = {\n 'registrationRequest': device_b,\n 'conditionalRegistrationData': [conditionals_b],\n 'grantRequest': grant_0,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_7_default_config)\n def test_WINNF_FT_S_QPR_7(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within 4.8 km of\n the FCC Field Offices.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequest': dict,\n 'conditionalRegistrationData': list,\n 'grantRequest': dict\n })\n\n # Whitelist FCC ID and User ID.\n self._sas_admin.InjectFccId({\n 'fccId': config['registrationRequest']['fccId']\n })\n self._sas_admin.InjectUserId({\n 'userId': config['registrationRequest']['userId']\n })\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSD.\n request = {'registrationRequest': [config['registrationRequest']]}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(response), 1)\n if response[0]['response']['responseCode'] != 0:\n return # SAS passes immediately in this case.\n cbsd_id = response[0]['cbsdId']\n del request, response\n\n # Calculate the closest FCC office\n lat_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'latitude']\n lon_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'longitude']\n distance_offices = [\n vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, office['latitude'], office['longitude'])[0]\n for office in self.fcc_offices\n ]\n index_closest = np.argmin(distance_offices)\n closest_fcc_office = self.fcc_offices[index_closest]\n logging.info('Closest FCC office Lat: %f', closest_fcc_office['latitude'])\n logging.info('Closest FCC office Long: %f',\n closest_fcc_office['longitude'])\n # Calculate bearing and ant_gain\n _, bearing, _ = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, closest_fcc_office['latitude'],\n closest_fcc_office['longitude'])\n ant_gain = antenna.GetStandardAntennaGains(\n bearing,\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaAzimuth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaBeamwidth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain']\n )\n logging.info('ant_gain is %f dBi', ant_gain)\n # Gather values required for calculating EIRP\n p = config['grantRequest']['operationParam']['maxEirp']\n logging.info('Grant maxEirp is %f', p)\n max_ant_gain = (\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain'])\n logging.info('max_ant_gain is %f dBi', max_ant_gain)\n bw = (\n config['grantRequest']['operationParam']['operationFrequencyRange']\n ['highFrequency'] - config['grantRequest']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('bw is %f MHz', bw)\n # Calculate EIRP to verify grant response\n eirp = (p - max_ant_gain + ant_gain + (10 * np.log10(bw)))\n logging.info('EIRP is %f dBm', eirp)\n\n # If successfully registered, CBSD sends a grant request\n config['grantRequest']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse']\n # Check grant response\n self.assertEqual(len(response), 1)\n # If EIRP <= 49.15 dBm = SUCCESS\n if eirp <= 49.15:\n self.assertEqual(response[0]['response']['responseCode'], 0)\n # If EIRP > 49.15 dBm = INTERFERENCE\n else:\n self.assertEqual(response[0]['response']['responseCode'], 400)\n\n def generate_QPR_8_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.8.\"\"\"\n\n # Load device info\n # Cat B - between 2.4 km - 4.8 km of Waipahu, Hawaii Field Office.\n device_b = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_b['installationParam']['latitude'] = 21.397934\n device_b['installationParam']['longitude'] = -158.034459\n\n # device_b is Category B with conditionals pre-loaded.\n self.assertEqual(device_b['cbsdCategory'], 'B')\n conditionals_b = {\n 'cbsdCategory': device_b['cbsdCategory'],\n 'fccId': device_b['fccId'],\n 'cbsdSerialNumber': device_b['cbsdSerialNumber'],\n 'airInterface': device_b['airInterface'],\n 'installationParam': device_b['installationParam'],\n 'measCapability': device_b['measCapability']\n }\n del device_b['installationParam']\n del device_b['cbsdCategory']\n del device_b['airInterface']\n del device_b['measCapability']\n\n # Grant Requests\n grant_0 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant_1 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n # Create the actual config.\n config = {\n 'registrationRequest': device_b,\n 'conditionalRegistrationData': [conditionals_b],\n 'grantRequest1': grant_0,\n 'grantRequest2': grant_1\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_8_default_config)\n def test_WINNF_FT_S_QPR_8(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within 4.8 km of\n the FCC Field Offices with multiple grants.\n \"\"\"\n config = loadConfig(config_filename)\n self.assertValidConfig(\n config, {\n 'registrationRequest': dict,\n 'conditionalRegistrationData': list,\n 'grantRequest1': dict,\n 'grantRequest2': dict\n })\n\n # Whitelist FCC ID and User ID.\n self._sas_admin.InjectFccId({\n 'fccId': config['registrationRequest']['fccId']\n })\n self._sas_admin.InjectUserId({\n 'userId': config['registrationRequest']['userId']\n })\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Step 1: Register CBSD.\n request = {'registrationRequest': [config['registrationRequest']]}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(response), 1)\n if response[0]['response']['responseCode'] != 0:\n return # SAS passes immediately in this case.\n cbsd_id = response[0]['cbsdId']\n del request, response\n\n # Step 2: Request first grant.\n config['grantRequest1']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest1']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse'][0]\n # Check if the first grant response is successful.\n grant1_approved = response['response']['responseCode'] == 0\n del request, response\n\n # Step 3: Request second grant\n config['grantRequest2']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest2']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse'][0]\n # Check if the second grant response is successful.\n grant2_approved = response['response']['responseCode'] == 0\n del request, response\n\n if not (grant1_approved or grant2_approved):\n logging.info('Both grant requests were rejected')\n return # SAS passes immediately in this case.\n\n # Calculate the closest FCC office\n lat_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'latitude']\n lon_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'longitude']\n distance_offices = [\n vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, office['latitude'], office['longitude'])[0]\n for office in self.fcc_offices\n ]\n index_closest = np.argmin(distance_offices)\n closest_fcc_office = self.fcc_offices[index_closest]\n logging.info('Closest FCC Office Lat: %f', closest_fcc_office['latitude'])\n logging.info('Closest FCC Office Long: %f', closest_fcc_office['longitude'])\n # Calculate bearing and ant_gain\n _, bearing, _ = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, closest_fcc_office['latitude'],\n closest_fcc_office['longitude'])\n logging.info('Bearing: %f', bearing)\n ant_gain_dbi = antenna.GetStandardAntennaGains(\n bearing,\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaAzimuth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaBeamwidth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain']\n )\n logging.info('Ant Gain: %f dBi', ant_gain_dbi)\n max_ant_gain_dbi = (\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain'])\n logging.info('Max Ant Gain: %f dBi', max_ant_gain_dbi)\n\n # Gather values required for calculating Total EIRP\n grant1_eirp = 0\n if grant1_approved:\n p1 = config['grantRequest1']['operationParam']['maxEirp']\n logging.info('Grant 1 Max Eirp: %f dBm/MHz', p1)\n bw1 = (config['grantRequest1']['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n config['grantRequest1']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 1 Bandwidth: %f', bw1)\n grant1_eirp = (10**(p1/10.0)) * bw1\n logging.info('Grant 1 nominal EIRP is %f', grant1_eirp)\n\n grant2_eirp = 0\n if grant2_approved:\n p2 = config['grantRequest2']['operationParam']['maxEirp']\n logging.info('Grant 2 Max Eirp: %f dBm/MHz', p2)\n bw2 = (config['grantRequest2']['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n config['grantRequest2']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 2 Bandwidth: %f', bw2)\n grant2_eirp = (10**(p2/10.0)) * bw2\n logging.info('Grant 2 nominal EIRP is %f', grant2_eirp)\n\n # Step 4: Calculate Total EIRP\n total_eirp_dbm = ant_gain_dbi - max_ant_gain_dbi + (\n 10 * np.log10(grant1_eirp + grant2_eirp))\n logging.info('Total EIRP is %f dBm', total_eirp_dbm)\n\n # CHECK: Total EIRP of all approved grants is <= 49.15 dBm\n self.assertLessEqual(total_eirp_dbm, 49.15)\n", "# Copyright 2017 SAS Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"WinnForum-specific version of ITM propagation model.\n\nTypical usage:\n # Configure the terrain driver (memory use is: cache_size * 50MB)\n from reference_models.geo import drive\n drive.ConfigureTerrainDriver(terrain_dir=my_ned_path, cache_size=16)\n\n # Get the path loss and incidence angles\n db_loss, incidence_angles, internals = CalcItmPropagationLoss(\n lat_cbsd, lon_cbsd, height_cbsd,\n lat_rx, lon_rx, height_rx,\n cbsd_indoor=False,\n reliability=0.5,\n freq_mhz=3625.)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport numpy as np\n\nfrom reference_models.geo import drive\nfrom reference_models.geo import vincenty\nfrom reference_models.propagation.itm import itm\n\n# TEMPORARY to avoid breaking code under PR\nterrainDriver = drive.terrain_driver\n\n\n# ITM warning codes\nclass ItmErrorCode:\n NONE = 0\n CAUTION = 1\n NOTE = 2\n WARNING = 3\n OTHER = 4\n\n_ITM_ERROR_MODES = {\n ItmErrorCode.NONE: 'No Error.',\n ItmErrorCode.CAUTION: ('Caution: Some parameters are nearly out of range.'\n ' Results should be used with caution.'),\n ItmErrorCode.NOTE: ('Note: Default parameters have been substituted for impossible ones.'),\n ItmErrorCode.WARNING: ('Warning: A combination of parameters is out of range.'\n ' Results are probably invalid.'),\n ItmErrorCode.OTHER: ('Warning: Some parameters are out of range.'\n ' Results are probably invalid.')\n}\n\n\ndef GetInfoOnItmCode(code):\n \"\"\"Get description of ITM error code.\"\"\"\n return _ITM_ERROR_MODES(code)\n\n# Defined namedtuple for nice output packing\n_PropagResult = namedtuple('_PropagResult',\n ['db_loss', 'incidence_angles', 'internals'])\n_IncidenceAngles = namedtuple('_IncidenceAngles',\n ['hor_cbsd', 'ver_cbsd', 'hor_rx', 'ver_rx'])\n\n\n# Main entry point for the Winnforum compliant ITM propagation model\ndef CalcItmPropagationLoss(lat_cbsd, lon_cbsd, height_cbsd,\n lat_rx, lon_rx, height_rx,\n cbsd_indoor=False,\n reliability=0.5,\n freq_mhz=3625.,\n its_elev=None,\n is_height_cbsd_amsl=False,\n return_internals=False):\n \"\"\"Implements the WinnForum-compliant ITM point-to-point propagation model.\n\n According to WinnForum spec R2-SGN-17, R2-SGN-22 and R2-SGN-5 to 10.\n\n One can use this routine in 3 ways:\n reliability = -1 : to get the average path loss\n reliability in [0,1] : to get a pathloss for given quantile\n sequence of reliabilities: to get an array of pathloss. Used to obtain\n inverse CDF of the pathloss.\n\n Inputs:\n lat_cbsd, lon_cbsd, height_cbsd: Lat/lon (deg) and height AGL (m) of CBSD\n lat_rx, lon_rx, height_rx: Lat/lon (deg) and height AGL (m) of Rx point\n cbsd_indoor: CBSD indoor status - Default=False.\n reliability: Reliability. Default is 0.5 (median value)\n Different options:\n value in [0,1]: returns the CDF quantile\n -1: returns the mean path loss\n iterable sequence: returns a list of path losses\n freq_mhz: Frequency (MHz). Default is mid-point of band.\n its_elev: Optional profile to use (in ITM format). Default=None\n If not specified, it is extracted from the terrain.\n is_height_cbsd_amsl: If True, the CBSD height shall be considered as AMSL (Average\n mean sea level).\n return_internals: If True, returns internal variables.\n\n Returns:\n A namedtuple of:\n db_loss Path Loss in dB, either a scalar if reliability is scalar\n or a list of path losses if reliability is an iterable.\n\n incidence_angles: A namedtuple of\n hor_cbsd: Horizontal departure angle (bearing) from CBSD to Rx\n ver_cbsd: Vertical departure angle at CBSD\n hor_rx: Horizontal incidence angle (bearing) from Rx to CBSD\n ver_rx: Vertical incidence angle at Rx\n\n internals: A dictionary of internal data for advanced analysis\n (only if return_internals=True):\n itm_err_num: ITM error code from ItmErrorCode (see GetInfoOnItmCode).\n itm_str_mode: String containing description of dominant prop mode.\n dist_km: Distance between end points (km).\n prof_d_km ndarray of distances (km) - x values to plot terrain.\n prof_elev ndarray of terrain heightsheights (m) - y values to plot terrain,\n\n Raises:\n Exception if input parameters invalid or out of range.\n \"\"\"\n # Case of same points\n if (lat_cbsd == lat_rx and lon_cbsd == lon_rx):\n return _PropagResult(\n db_loss = 0 if np.isscalar(reliability) else [0] * len(reliability),\n incidence_angles = _IncidenceAngles(0,0,0,0),\n internals = None)\n\n # Sanity checks on input parameters\n if freq_mhz < 40.0 or freq_mhz > 10000:\n raise Exception('Frequency outside range [40MHz - 10GHz]')\n\n if is_height_cbsd_amsl:\n altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)\n height_cbsd = height_cbsd - altitude_cbsd\n\n # Ensure minimum height of 1 meter\n if height_cbsd < 1:\n height_cbsd = 1\n if height_rx < 1:\n height_rx = 1\n\n # Internal ITM parameters are always set to following values in WF version:\n confidence = 0.5 # Confidence (always 0.5)\n dielec = 25. # Dielectric constant (always 25.)\n conductivity = 0.02 # Conductivity (always 0.02)\n polarization = 1 # Polarization (always vertical = 1)\n mdvar = 13\n\n # Get the terrain profile, using Vincenty great circle route, and WF\n # standard (bilinear interp; 1500 pts for all distances over 45 km)\n if its_elev is None:\n its_elev = drive.terrain_driver.TerrainProfile(\n lat1=lat_cbsd, lon1=lon_cbsd,\n lat2=lat_rx, lon2=lon_rx,\n target_res_meter=30.,\n do_interp=True, max_points=1501)\n\n # Find the midpoint of the great circle path\n dist_km, bearing_cbsd, bearing_rx = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, lat_rx, lon_rx)\n latmid, lonmid, _ = vincenty.GeodesicPoint(\n lat_cbsd, lon_cbsd, dist_km/2., bearing_cbsd)\n\n # Determine climate value, based on ITU-R P.617 method:\n climate = drive.climate_driver.TropoClim(latmid, lonmid)\n # If the common volume lies over the sea, the climate value to use depends\n # on the climate values at either end. A simple min() function should\n # properly implement the logic, since water is the max.\n if climate == 7:\n climate = min(drive.climate_driver.TropoClim(lat_cbsd, lon_cbsd),\n drive.climate_driver.TropoClim(lat_rx, lon_rx))\n\n # Look up the refractivity at the path midpoint, if not explicitly provided\n refractivity = drive.refract_driver.Refractivity(latmid, lonmid)\n\n # Call ITM prop loss.\n reliabilities = reliability\n do_avg = False\n if np.isscalar(reliabilities) and reliability == -1:\n # Pathloss mean: average the value for 1% to 99% included\n reliabilities = np.arange(0.01, 1.0, 0.01)\n do_avg = True\n\n db_loss, ver_cbsd, ver_rx, str_mode, err_num = itm.point_to_point(\n its_elev, height_cbsd, height_rx,\n dielec, conductivity,\n refractivity, freq_mhz,\n climate, polarization,\n confidence, reliabilities,\n mdvar, False)\n if do_avg:\n db_loss = -10*np.log10(np.mean(10**(-np.array(db_loss)/10.)))\n\n # Add indoor losses\n if cbsd_indoor:\n if np.isscalar(db_loss):\n db_loss += 15\n else:\n db_loss = [loss+15 for loss in db_loss]\n\n # Create distance/terrain arrays for plotting if desired\n internals = None\n if return_internals:\n prof_d_km = (its_elev[1]/1000.) * np.arange(len(its_elev)-2)\n prof_elev = np.asarray(its_elev[2:])\n internals = {\n 'itm_err_num': err_num,\n 'itm_str_mode': str_mode,\n 'dist_km': dist_km,\n 'prof_d_km': prof_d_km,\n 'prof_elev': prof_elev\n }\n\n return _PropagResult(\n db_loss = db_loss,\n incidence_angles = _IncidenceAngles(\n hor_cbsd = bearing_cbsd,\n ver_cbsd = ver_cbsd,\n hor_rx = bearing_rx,\n ver_rx = ver_rx),\n internals = internals\n )\n\n\n# Utility function to compute the HAAT for a CBSD\ndef ComputeHaat(lat_cbsd, lon_cbsd, height_cbsd, height_is_agl=True):\n \"\"\"Computes a CBSD HAAT (Height above average terrain).\n\n Args:\n lat_cbsd, lon_cbsd: the CBSD location (degrees).\n height_cbsd: the CBSD antenna height (meters)\n height_is_agl: boolean specifying if height is AGL (Above Ground Level)\n or AMSL (Above Mean Sea Level).\n\n Returns:\n the CBSD HAAT (meters).\n \"\"\"\n norm_haat, alt_ground = drive.terrain_driver.ComputeNormalizedHaat(lat_cbsd, lon_cbsd)\n if height_is_agl:\n return height_cbsd + norm_haat\n else:\n return height_cbsd - alt_ground + norm_haat\n" ]
[ [ "numpy.log10", "numpy.argmin" ], [ "numpy.asarray", "numpy.arange", "numpy.array", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
geexie/dpbench
[ "7d41409ded3c816f35003bc5aea071852bceb892", "7d41409ded3c816f35003bc5aea071852bceb892", "7d41409ded3c816f35003bc5aea071852bceb892" ]
[ "native_dpcpp/blackscholes/GPU/base_bs_erf.py", "numba/dbscan/CPU/base_dbscan.py", "native_dpcpp/gaussian_elim/GPU/base_gaussian_elim.py" ]
[ "# Copyright (C) 2017-2018 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport os\nimport run_utils as utils\nimport numpy as np\nfrom dpbench_datagen.blackscholes import gen_data_to_file, gen_rand_data\nfrom dpbench_python.blackscholes.bs_python import black_scholes_python\n\n# make xrange available in python 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\ndef ip_data_to_file(nopt):\n gen_data_to_file(nopt)\n\n\ndef gen_data_np(nopt):\n price, strike, t = gen_rand_data(nopt)\n return (\n price,\n strike,\n t,\n np.zeros(nopt, dtype=np.float64),\n -np.ones(nopt, dtype=np.float64),\n )\n\n\nRISK_FREE = 0.1\nVOLATILITY = 0.2\n\n# create input data, call blackscholes computation function (alg)\ndef run(name, sizes=14, step=2, nopt=2 ** 15):\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--steps\", required=False, default=sizes, help=\"Number of steps\"\n )\n parser.add_argument(\n \"--step\", required=False, default=step, help=\"Factor for each step\"\n )\n parser.add_argument(\n \"--size\", required=False, default=nopt, help=\"Initial data size\"\n )\n parser.add_argument(\n \"--repeat\", required=False, default=1, help=\"Iterations inside measured region\"\n )\n parser.add_argument(\n \"--usm\",\n required=False,\n action=\"store_true\",\n help=\"Use USM Shared or pure numpy\",\n )\n parser.add_argument(\n \"--test\",\n required=False,\n action=\"store_true\",\n help=\"Check for correctness by comparing output with naieve Python version\",\n )\n\n args = parser.parse_args()\n sizes = int(args.steps)\n step = int(args.step)\n nopt = int(args.size)\n repeat = int(args.repeat)\n\n clean_string = [\"make\", \"clean\"]\n utils.run_command(clean_string, verbose=True)\n\n if args.usm:\n build_string = [\"make\", \"comp\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes_comp\"\n else:\n build_string = [\"make\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes\"\n\n if args.test:\n # run sequential python\n price, strike, t, p_call, p_put = gen_data_np(nopt)\n black_scholes_python(\n nopt, price, strike, t, RISK_FREE, VOLATILITY, p_call, p_put\n )\n\n # run dpcpp\n ip_data_to_file(nopt)\n run_cmd = [exec_name, str(nopt), str(1), \"-t\"]\n utils.run_command(run_cmd, verbose=True)\n\n # read output of dpcpp into n_call, n_put\n n_call = np.fromfile(\"call.bin\", np.float64)\n\n # read output of dpcpp into n_call, n_put\n n_put = np.fromfile(\"put.bin\", np.float64)\n\n # compare outputs\n if np.allclose(n_call, p_call) and np.allclose(n_put, p_put):\n print(\"Test succeeded\\n\")\n else:\n print(\"Test failed\\n\")\n return\n\n if os.path.isfile(\"runtimes.csv\"):\n os.remove(\"runtimes.csv\")\n\n for i in xrange(sizes):\n # generate input data\n ip_data_to_file(nopt)\n\n # run the C program\n run_cmd = [exec_name, str(nopt), str(repeat)]\n utils.run_command(run_cmd, verbose=True)\n nopt *= step\n repeat -= step\n if repeat < 1:\n repeat = 1\n\n\nif __name__ == \"__main__\":\n run(\"Blackscholes dpcpp\")\n", "# *****************************************************************************\n# Copyright (c) 2020, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\nimport argparse\nimport sys\nimport numpy as np\nimport numpy.random as rnd\nimport sys, json\nfrom typing import NamedTuple\nfrom sklearn.datasets import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nimport dbscan_python\n\ntry:\n import itimer as it\n\n now = it.itime\n get_mops = it.itime_mops_now\nexcept:\n from timeit import default_timer\n\n now = default_timer\n get_mops = lambda t0, t1, n: (n / (t1 - t0), t1 - t0)\n\n######################################################\n# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #\n######################################################\n\n# make xrange available in python 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\n###############################################\n\n\nclass DataSize(NamedTuple):\n n_samples: int\n n_features: int\n\n\nclass Params(NamedTuple):\n eps: float\n minpts: int\n\n\nSEED = 7777777\nOPTIMAL_PARAMS = {\n DataSize(n_samples=2 ** 8, n_features=2): Params(eps=0.173, minpts=4),\n DataSize(n_samples=2 ** 8, n_features=3): Params(eps=0.35, minpts=6),\n DataSize(n_samples=2 ** 8, n_features=10): Params(eps=0.8, minpts=20),\n DataSize(n_samples=2 ** 9, n_features=2): Params(eps=0.15, minpts=4),\n DataSize(n_samples=2 ** 9, n_features=3): Params(eps=0.1545, minpts=6),\n DataSize(n_samples=2 ** 9, n_features=10): Params(eps=0.7, minpts=20),\n DataSize(n_samples=2 ** 10, n_features=2): Params(eps=0.1066, minpts=4),\n DataSize(n_samples=2 ** 10, n_features=3): Params(eps=0.26, minpts=6),\n DataSize(n_samples=2 ** 10, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 11, n_features=2): Params(eps=0.095, minpts=4),\n DataSize(n_samples=2 ** 11, n_features=3): Params(eps=0.18, minpts=6),\n DataSize(n_samples=2 ** 11, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 12, n_features=2): Params(eps=0.0715, minpts=4),\n DataSize(n_samples=2 ** 12, n_features=3): Params(eps=0.17, minpts=6),\n DataSize(n_samples=2 ** 12, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 13, n_features=2): Params(eps=0.073, minpts=4),\n DataSize(n_samples=2 ** 13, n_features=3): Params(eps=0.149, minpts=6),\n DataSize(n_samples=2 ** 13, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 14, n_features=2): Params(eps=0.0695, minpts=4),\n DataSize(n_samples=2 ** 14, n_features=3): Params(eps=0.108, minpts=6),\n DataSize(n_samples=2 ** 14, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 15, n_features=2): Params(eps=0.0695, minpts=4),\n DataSize(n_samples=2 ** 15, n_features=3): Params(eps=0.108, minpts=6),\n DataSize(n_samples=2 ** 15, n_features=10): Params(eps=0.6, minpts=20),\n DataSize(n_samples=2 ** 16, n_features=2): Params(eps=0.0695, minpts=4),\n DataSize(n_samples=2 ** 16, n_features=3): Params(eps=0.108, minpts=6),\n DataSize(n_samples=2 ** 16, n_features=10): Params(eps=0.6, minpts=20),\n}\n\n\ndef gen_data(n_samples, n_features, centers=10, random_state=SEED):\n X, *_ = make_blobs(\n n_samples=n_samples, n_features=n_features, centers=centers, random_state=SEED\n )\n X = StandardScaler().fit_transform(X)\n\n return X.flatten()\n\n\n##############################################\n\n\ndef run(name, alg, sizes=5, step=2, nopt=2 ** 10):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--steps\", type=int, default=sizes, help=\"Number of steps\")\n parser.add_argument(\"--step\", type=int, default=step, help=\"Factor for each step\")\n parser.add_argument(\"--size\", type=int, default=nopt, help=\"Initial data size\")\n parser.add_argument(\n \"--repeat\", type=int, default=1, help=\"Iterations inside measured region\"\n )\n parser.add_argument(\"--dims\", type=int, default=10, help=\"Dimensions\")\n parser.add_argument(\"--eps\", type=float, default=0.6, help=\"Neighborhood value\")\n parser.add_argument(\"--minpts\", type=int, default=20, help=\"minPts\")\n parser.add_argument(\n \"--json\",\n required=False,\n default=__file__.replace(\"py\", \"json\"),\n help=\"output json data filename\",\n )\n parser.add_argument(\n \"--test\",\n required=False,\n action=\"store_true\",\n help=\"Check for correctness by comparing output with naieve Python version\",\n )\n\n args = parser.parse_args()\n nopt = args.size\n repeat = args.repeat\n\n output = {}\n output[\"name\"] = name\n output[\"sizes\"] = sizes\n output[\"step\"] = step\n output[\"repeat\"] = repeat\n output[\"randseed\"] = SEED\n output[\"metrics\"] = []\n\n rnd.seed(SEED)\n\n if args.test:\n data = gen_data(nopt, args.dims)\n assignments = np.empty(nopt, dtype=np.int64)\n data_size = DataSize(n_samples=nopt, n_features=args.dims)\n params = OPTIMAL_PARAMS.get(data_size, Params(eps=args.eps, minpts=args.minpts))\n minpts = params.minpts or args.minpts\n eps = params.eps or args.eps\n\n p_nclusters = dbscan_python.dbscan(\n nopt, args.dims, data, eps, minpts, assignments\n )\n n_nclusters = alg(nopt, args.dims, data, eps, minpts, assignments)\n\n if np.allclose(n_nclusters, p_nclusters):\n print(\"Test succeeded\\n\")\n else:\n print(\"Test failed\\n\")\n return\n\n with open(\"perf_output.csv\", \"w\", 1) as mops_fd, open(\n \"runtimes.csv\", \"w\", 1\n ) as runtimes_fd:\n for _ in xrange(args.steps):\n data = gen_data(nopt, args.dims)\n assignments = np.empty(nopt, dtype=np.int64)\n\n data_size = DataSize(n_samples=nopt, n_features=args.dims)\n params = OPTIMAL_PARAMS.get(\n data_size, Params(eps=args.eps, minpts=args.minpts)\n )\n # if params.eps is None or params.minpts is None:\n # err_msg_tmpl = 'ERF: {}: Size: {} Dim: {} Eps: {} minPts: {}'\n # raise ValueError(err_msg_tmpl.format(name, nopt, args.dims, params.eps, params.minpts))\n\n minpts = params.minpts or args.minpts\n eps = params.eps or args.eps\n\n nclusters = alg(nopt, args.dims, data, eps, minpts, assignments) # warmup\n\n t0 = now()\n for _ in xrange(repeat):\n nclusters = alg(nopt, args.dims, data, eps, minpts, assignments)\n mops, time = get_mops(t0, now(), nopt)\n result_mops = mops * repeat / 1e6\n\n print(\n \"ERF: {:15s} | Size: {:10d} | MOPS: {:15.2f} | TIME: {:10.6f}\".format(\n name, nopt, result_mops, time\n ),\n flush=True,\n )\n output[\"metrics\"].append((nopt, mops, time))\n\n mops_fd.write(\n \"{},{},{},{},{},{}\\n\".format(\n nopt, args.dims, eps, minpts, nclusters, result_mops\n )\n )\n runtimes_fd.write(\n \"{},{},{},{},{},{}\\n\".format(\n nopt, args.dims, eps, minpts, nclusters, time\n )\n )\n\n nopt *= args.step\n repeat = max(repeat - args.step, 1)\n json.dump(output, open(args.json, \"w\"), indent=2, sort_keys=True)\n", "# Copyright (C) 2017-2018 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport os\nimport run_utils as utils\nimport numpy as np\nimport argparse\nfrom dpbench_datagen.gaussian_elim import gen_matrix, gen_vec, gen_data_to_file\n\n\ndef run(name, sizes=1, step=2, nopt=2 ** 2):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--steps\", type=int, default=sizes, help=\"Number of steps\")\n parser.add_argument(\"--step\", type=int, default=step, help=\"Factor for each step\")\n parser.add_argument(\"--size\", type=int, default=nopt, help=\"Initial data size\")\n parser.add_argument(\n \"--repeat\", type=int, default=1, help=\"Iterations inside measured region\"\n )\n parser.add_argument(\n \"--test\",\n required=False,\n action=\"store_true\",\n help=\"Check for correctness by comparing output with naieve Python version\",\n )\n parser.add_argument(\n \"--usm\",\n required=False,\n action=\"store_true\",\n help=\"Use USM Shared or data transfer\",\n )\n parser.add_argument(\n \"--json\",\n required=False,\n default=__file__.replace(\"py\", \"json\"),\n help=\"output json data filename\",\n )\n\n args = parser.parse_args()\n nopt = args.size\n repeat = args.repeat\n\n clean_string = [\"make\", \"clean\"]\n utils.run_command(clean_string, verbose=True)\n\n if args.usm:\n build_string = [\"make\", \"comp\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./gaussian_comp\"\n else:\n build_string = [\"make\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./gaussian\"\n\n if args.test:\n reference_result = [5.02e-02, 5.00e-04, 5.00e-04, 5.02e-02]\n ref_size = 4\n\n # run dpcpp\n gen_data_to_file(ref_size, 1.0)\n # run the C program\n run_cmd = [exec_name, str(ref_size), str(1), \"-t\"]\n utils.run_command(run_cmd, verbose=True)\n\n # read output of dpcpp\n result = np.fromfile(\"result.bin\", np.float32)\n\n if np.allclose(result, reference_result):\n print(\n \"Test succeeded. Python result: \",\n reference_result,\n \" DPC++ result: \",\n result,\n \"\\n\",\n )\n else:\n print(\n \"Test failed. Python result: \",\n reference_result,\n \" DPC++ result: \",\n result,\n \"\\n\",\n )\n return\n\n for _ in range(sizes):\n # generate input data\n # value = 1.0 for the vector of coefficients (b)\n gen_data_to_file(nopt, 1.0)\n\n # run the C program\n run_cmd = [exec_name, str(nopt), str(repeat)]\n utils.run_command(run_cmd, verbose=True)\n nopt *= step\n repeat -= step\n if repeat < 1:\n repeat = 1\n\n if os.path.isfile(\"./gaussian\"):\n os.remove(\"./gaussian\")\n\n if os.path.isfile(\"./gaussian_comp\"):\n os.remove(\"./gaussian_comp\")\n\n if os.path.isfile(\"m_data.bin\"):\n os.remove(\"m_data.bin\")\n\n if os.path.isfile(\"v_data.bin\"):\n os.remove(\"m_data.bin\")\n\n\nif __name__ == \"__main__\":\n run(\"Gaussian elimination dpcpp\")\n" ]
[ [ "numpy.fromfile", "numpy.zeros", "numpy.allclose", "numpy.ones" ], [ "numpy.allclose", "numpy.random.seed", "sklearn.datasets.make_blobs", "sklearn.preprocessing.StandardScaler", "numpy.empty" ], [ "numpy.fromfile", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fyabc/MSRAPaperProject
[ "2d7974acfe8065523d0c56da695807e94acd0b34" ]
[ "MyMLFramework/tests.py" ]
[ "#! /usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function\n\nimport numpy as np\nimport theano.tensor as T\nfrom theano import shared, config\n\nfrom model import SimpleModel, Model\nfrom layers.layer import Dense\nfrom layers.activations import Activation\n\n__author__ = 'fyabc'\n\nfX = config.floatX\n\n\ndef testSimpleModel():\n # config.optimizer = 'None'\n # config.exception_verbosity = 'high'\n\n # def initNorm(*args):\n # return np.asarray(np.random.randn(*args) * 0.1, dtype=fX)\n\n model = SimpleModel()\n model.setInput(2)\n\n W1 = shared(value=np.asarray([\n [1, 2, 3],\n [4, 0, 1],\n ], dtype=fX), name='W1')\n b1 = shared(value=np.asarray([\n 1, 2, 4,\n ], dtype=fX), name='b1')\n\n def layer1(x):\n return T.nnet.relu(T.dot(x, W1) + b1)\n\n model.addRaw(layer1, [W1, b1])\n\n model.compile()\n\n result = model.objectiveFunction([[6, 1], [7, 2], [8, 3]], [[7, 6, 2], [1, 2, 3], [4, 0, 5]])\n\n print(result)\n\n\ndef testModel():\n model = Model()\n model.add(Dense(\n outputShape=(3, 3,),\n inputShape=(3, 2,)\n ))\n model.add(Activation('sigmoid'))\n\n model.compile()\n\n result = model.objectiveFunction([[6, 1], [7, 2], [8, 3]], [[7, 6, 2], [1, 2, 3], [4, 0, 5]])\n\n print(result)\n\n\ndef test():\n # testSimpleModel()\n testModel()\n\n\nif __name__ == '__main__':\n test()\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YuxinZou/volkscv
[ "67ac83f0c0ac85bd6606053732b454db17c53de0" ]
[ "volkscv/utils/parser/txt_parse.py" ]
[ "import os\n\nimport numpy as np\n\nfrom .base import BaseParser\nfrom .utils import read_imglist\n\n\nclass TXTParser(BaseParser):\n \"\"\"Class of parser for classification TXT annotation file.\n\n xxx.png dog\n xxx.png cat\n xxxx.png dog\n\n Args:\n anno_path (str): Path of annotation file.\n categories (list or tuple): All categories of data.\n \"\"\"\n\n def __init__(self,\n categories=None,\n **kwargs):\n super(TXTParser, self).__init__(**kwargs)\n\n self.categories = categories\n assert self.imgs_list is not None, \\\n \"For txt file parser, the imgs_list attribute shouldn't be None.\"\n\n def __call__(self, need_shape=True):\n fname_list, labels_list, shapes_list, scores_list = [], [], [], []\n fnames, annos = read_imglist(self.txt_file)\n for fname, anno in zip(fnames, annos):\n fname = os.path.join(self.imgs_folder, fname)\n height, width = self._get_shape(fname) if need_shape else (0, 0)\n shapes_list.append([width, height])\n fname_list.append(fname)\n assert anno[0] in self.categories, \\\n f'Label: {anno[0]} is not in categories.'\n labels_list.append(self.categories.index(anno[0]))\n if len(anno) > 1:\n scores_list.append(float(anno[1]))\n\n self.result = dict(\n img_names=np.array(fname_list),\n categories=np.array(self.categories),\n shapes=np.array(shapes_list),\n labels=np.array(labels_list),\n scores=np.array(scores_list) if len(scores_list) else None,\n )\n return self.result\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mabdulhussin/openpilot
[ "3988e14c95043c7f1d35bfe5edbd7701fa44f9bb", "3988e14c95043c7f1d35bfe5edbd7701fa44f9bb" ]
[ "selfdrive/mapd/test/test_NodesData.py", "selfdrive/mapd/lib/Route.py" ]
[ "import unittest\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom selfdrive.mapd.lib.mock_data import MockRoad\nfrom selfdrive.mapd.lib.NodesData import vectors\n\n\nclass TestNodesData(unittest.TestCase):\n def test_vectors(self):\n points = np.radians(MockRoad.road1_points_grad)\n expected = np.array([\n [-1.34011951e-05, 1.00776468e-05],\n [-5.83610920e-06, 4.41046897e-06],\n [-7.83348567e-06, 5.94114032e-06],\n [-7.08560788e-06, 5.30408795e-06],\n [-6.57632550e-06, 4.05791838e-06],\n [-1.16077872e-06, 6.91151252e-07],\n [-1.53178098e-05, 9.62215139e-06],\n [-5.76314175e-06, 3.55176643e-06],\n [-1.61124141e-05, 9.86127759e-06],\n [-1.48006628e-05, 8.58192512e-06],\n [-1.72237209e-06, 1.60570482e-06],\n [-8.68985228e-06, 9.22062311e-06],\n [-1.42922812e-06, 1.51494711e-06],\n [-3.39761486e-06, 2.57087743e-06],\n [-2.75467373e-06, 1.28631255e-06],\n [-1.57501989e-05, 5.72309451e-06],\n [-2.52143954e-06, 1.34565295e-06],\n [-1.65278643e-06, 1.28630942e-06],\n [-2.22196114e-05, 1.64360838e-05],\n [-5.88675934e-06, 4.08234746e-06],\n [-1.83673390e-06, 1.46782408e-06],\n [-1.55004206e-06, 1.51843800e-06],\n [-1.20451533e-06, 2.06298011e-06],\n [-1.91801338e-06, 4.64083285e-06],\n [-2.38653483e-06, 5.60076524e-06],\n [-1.65269781e-06, 5.78402290e-06],\n [-3.66908309e-07, 2.75412965e-06],\n [0.00000000e+00, 1.92858882e-06],\n [9.09242615e-08, 2.66162711e-06],\n [3.14490354e-07, 1.53065382e-06],\n [8.66452477e-08, 4.83456208e-07],\n [2.41750593e-07, 1.10828411e-06],\n [7.43745228e-06, 1.27618831e-05],\n [5.59968054e-06, 9.63947367e-06],\n [2.01951467e-06, 2.75413219e-06],\n [4.59952643e-07, 6.42281301e-07],\n [1.74353749e-06, 1.74533121e-06],\n [2.57144338e-06, 2.11185266e-06],\n [1.46893187e-05, 1.11999169e-05],\n [3.84659229e-05, 2.85527952e-05],\n [2.71627936e-05, 1.98727946e-05],\n [8.44632540e-06, 6.15058628e-06],\n [2.29420323e-06, 1.92859222e-06],\n [2.58083439e-06, 3.16952222e-06],\n [3.76373643e-06, 5.14174911e-06],\n [5.32416098e-06, 6.51707770e-06],\n [8.62890928e-06, 1.11998258e-05],\n [1.25762497e-05, 1.65231340e-05],\n [8.90452991e-06, 1.10148240e-05],\n [4.86505726e-06, 4.59023120e-06],\n [3.85545276e-06, 3.39642031e-06],\n [3.48753893e-06, 3.30566145e-06],\n [2.99557303e-06, 2.61276368e-06],\n [2.15496788e-06, 1.87797727e-06],\n [4.10564937e-06, 3.58142649e-06],\n [1.53680853e-06, 1.33866906e-06],\n [4.99540175e-06, 4.35635790e-06],\n [1.37744970e-06, 1.19380643e-06],\n [1.74319821e-06, 1.28456429e-06],\n [9.99931238e-07, 1.14493663e-06],\n [6.42735560e-07, 1.19380547e-06],\n [3.66818436e-07, 1.46782199e-06],\n [5.45413874e-08, 1.83783170e-06],\n [-1.35818548e-07, 1.14842666e-06],\n [-5.50758101e-07, 3.02989178e-06],\n [-4.58785270e-07, 2.66162724e-06],\n [-2.51315555e-07, 1.19031459e-06],\n [-3.91409773e-07, 1.65457223e-06],\n [-2.14525206e-06, 5.67755902e-06],\n [-4.24558096e-07, 1.39102753e-06],\n [-1.46936730e-06, 5.32325561e-06],\n [-1.37632061e-06, 4.59021715e-06],\n [-8.26642899e-07, 4.68097349e-06],\n [-6.42702724e-07, 4.95673534e-06],\n [-3.66796960e-07, 7.25009780e-06],\n [-1.82861669e-07, 8.99542699e-06],\n [4.09564134e-07, 6.11214315e-06],\n [7.80629912e-08, 1.45734993e-06],\n [4.81205526e-07, 7.56076647e-06],\n [2.01036346e-07, 2.42775302e-06]])\n\n v = vectors(points)\n assert_array_almost_equal(v, expected)\n", "from selfdrive.mapd.lib.NodesData import NodesData, NodeDataIdx\nfrom selfdrive.mapd.config import QUERY_RADIUS\nfrom selfdrive.mapd.lib.geo import ref_vectors, R, distance_to_points\nfrom itertools import compress\nimport numpy as np\n\n\n_ACCEPTABLE_BEARING_DELTA_COSINE = -0.7 # Continuation paths with a bearing of 180 +/- 45 degrees.\n_MAX_ALLOWED_BEARING_DELTA_COSINE_AT_EDGE = -0.3420 # bearing delta at route edge must be 180 +/- 70 degrees.\n_MAP_DATA_EDGE_DISTANCE = 50 # mts. Consider edge of map data from this distance to edge of query radius.\n\n\nclass Route():\n \"\"\"A set of consecutive way relations forming a default driving route.\n \"\"\"\n def __init__(self, current, wr_index, way_collection_id, query_center):\n \"\"\"Create a Route object from a given `wr_index` (Way relation index)\n\n Args:\n current (WayRelation): The Way Relation that is currently located. It must be active.\n wr_index (Dict(NodeId, [WayRelation])): The index of WayRelations by node id of an edge node.\n way_collection_id (UUID): The id of the Way Collection that created this Route.\n query_center (Numpy Array): lat, lon] numpy array in radians indicating the center of the data query.\n \"\"\"\n self.way_collection_id = way_collection_id\n self._ordered_way_relations = []\n self._nodes_data = None\n self._reset()\n\n # An active current way is needed to be able to build a route\n if not current.active:\n return\n\n # Build the route by finding iteratavely the best matching ways continuing after the end of the\n # current (last_wr) way. Use the index to find the continuation posibilities on each iteration.\n last_wr = current\n ordered_way_ids = []\n while True:\n # - Append current element to the route list of ordered way relations.\n self._ordered_way_relations.append(last_wr)\n ordered_way_ids.append(last_wr.id)\n\n # - Get the id of the node at the end of the way and then fetch the way relations that share the end node id.\n last_node_id = last_wr.last_node.id\n way_relations = wr_index[last_node_id]\n\n # - If no more way_relations than last_wr, we got to the end.\n if len(way_relations) == 1:\n break\n\n # - Get the coordinates for the edge node and build the array of coordinates for the nodes before the edge node\n # on each of the common way relations, then get the vectors in cartesian plane for the end sections of each way.\n ref_point = last_wr.last_node_coordinates\n points = np.array(list(map(lambda wr: wr.node_before_edge_coordinates(last_node_id), way_relations)))\n v = ref_vectors(ref_point, points) * R\n\n # - Calculate the bearing (from true north clockwise) for every end section of each way.\n b = np.arctan2(v[:, 0], v[:, 1])\n\n # - Find index of las_wr section and calculate deltas of bearings to the other sections.\n last_wr_idx = way_relations.index(last_wr)\n b_ref = b[last_wr_idx]\n delta = b - b_ref\n\n # - Update the direction of the possible route continuation ways as starting from last_node_id.\n # Make sure to exclude any ways already included in the ordered list as to not modify direction when there\n # are looping roads (like roundabouts). A way will never be included twice in a route anyway.\n for wr in way_relations:\n if wr.id not in ordered_way_ids:\n wr.update_direction_from_starting_node(last_node_id)\n\n # - Filter the possible route continuation way relations:\n # - exclude any way already added to the ordered list.\n # - exclude all way relations that are prohibited due to traffic direction.\n mask = [wr.id not in ordered_way_ids and not wr.is_prohibited for wr in way_relations]\n way_relations = list(compress(way_relations, mask))\n delta = delta[mask]\n\n # if no options left, we got to the end.\n if len(way_relations) == 0:\n break\n\n # - The cosine of the bearing delta will aid us in choosing the way that continues. The cosine is\n # minimum (-1) for a perfect straight continuation as delta would be pi or -pi.\n cos_delta = np.cos(delta)\n\n def pick_best_idx(cos_delta):\n \"\"\"Selects the best index on `cos_delta` array for a way that continues the route.\n In principle we want to choose the way that continues as straight as possible.\n Bue we need to make sure that if there are 2 or more ways continuing relatively straight, then we\n need to disambiguate, either by matching the `ref` or `name` value of the continuing way with the\n last way selected.\n This can prevent cases where the chosen route could be for instance an exit ramp of a way due to the fact\n that the ramp has a better match on bearing to previous way. We choose to stay on the road with the same `ref`\n or `name` value if available.\n If there is no ambiguity or there are no `name` or `ref` values to disambiguate, then we pick the one with\n the straightest following direction.\n \"\"\"\n # Find the indexes of the cosine of the deltas that are considered straight enough to continue.\n idxs = np.nonzero(cos_delta < _ACCEPTABLE_BEARING_DELTA_COSINE)[0]\n\n # If no amiguity or no way to break it, just return the straightest line.\n if len(idxs) <= 1 or (last_wr.ref is None and last_wr.name is None):\n # The section with the best continuation is the one with a bearing delta closest to pi. This is equivalent\n # to taking the one with the smallest cosine of the bearing delta, as cosine is minimum (-1) on both pi\n # and -pi.\n return np.argmin(cos_delta)\n\n wrs = [way_relations[idx] for idx in idxs]\n\n # If we find a continuation way with the same reference we just choose it.\n refs = list(map(lambda wr: wr.ref, wrs))\n if last_wr.ref is not None:\n idx = next((idx for idx, ref in enumerate(refs) if ref == last_wr.ref), None)\n if idx is not None:\n return idxs[idx]\n\n # If we find a continuation way with the same name we just choose it.\n names = list(map(lambda wr: wr.name, wrs))\n if last_wr.name is not None:\n idx = next((idx for idx, name in enumerate(names) if name == last_wr.name), None)\n if idx is not None:\n return idxs[idx]\n\n # We did not manage to deambiguate, choose straightest path.\n return np.argmin(cos_delta)\n\n # Get the index of the continuation way.\n best_idx = pick_best_idx(cos_delta)\n\n # - Make sure to not select as route continuation a way that turns too much if we are close to the border of\n # map data queried. This is to avoid building a route that takes a sharp turn just because we do not have the\n # data for the way that actually continues straight.\n if cos_delta[best_idx] > _MAX_ALLOWED_BEARING_DELTA_COSINE_AT_EDGE:\n dist_to_center = distance_to_points(query_center, np.array([ref_point]))[0]\n if dist_to_center > QUERY_RADIUS - _MAP_DATA_EDGE_DISTANCE:\n break\n\n # - Select next way.\n last_wr = way_relations[best_idx]\n\n # Build the node data from the ordered list of way relations\n self._nodes_data = NodesData(self._ordered_way_relations, wr_index)\n\n # Locate where we are in the route node list.\n self._locate()\n\n def __repr__(self):\n count = self._nodes_data.count if self._nodes_data is not None else None\n return f'Route: {self.way_collection_id}, idx ahead: {self._ahead_idx} of {count}'\n\n def _reset(self):\n self._limits_ahead = None\n self._cuvature_limits_ahead = None\n self._curvatures_ahead = None\n self._ahead_idx = None\n self._distance_to_node_ahead = None\n\n @property\n def located(self):\n return self._ahead_idx is not None\n\n def _locate(self):\n \"\"\"Will resolve the index in the nodes_data list for the node ahead of the current location.\n It updates as well the distance from the current location to the node ahead.\n \"\"\"\n current = self.current_wr\n if current is None:\n return\n\n node_ahead_id = current.node_ahead.id\n self._distance_to_node_ahead = current.distance_to_node_ahead\n start_idx = self._ahead_idx if self._ahead_idx is not None else 1\n self._ahead_idx = None\n\n ids = self._nodes_data.get(NodeDataIdx.node_id)\n for idx in range(start_idx, len(ids)):\n if ids[idx] == node_ahead_id:\n self._ahead_idx = idx\n break\n\n @property\n def current_wr(self):\n return self._ordered_way_relations[0] if len(self._ordered_way_relations) else None\n\n def update(self, location_rad, bearing_rad, accuracy):\n \"\"\"Will update the route structure based on the given `location_rad` and `bearing_rad` assuming progress on the\n route on the original direction. If direction has changed or active point on the route can not be found, the route\n will become invalid.\n \"\"\"\n if len(self._ordered_way_relations) == 0 or location_rad is None or bearing_rad is None:\n return\n\n # Skip if no update on location or bearing.\n if np.array_equal(self.current_wr.location_rad, location_rad) and self.current_wr.bearing_rad == bearing_rad:\n return\n\n # Transverse the way relations on the actual order until we find an active one. From there, rebuild the route\n # with the way relations remaining ahead.\n for idx, wr in enumerate(self._ordered_way_relations):\n active_direction = wr.direction\n wr.update(location_rad, bearing_rad, accuracy)\n\n if not wr.active:\n continue\n\n if wr.direction != active_direction:\n # Driving direction on the route has changed. stop.\n break\n\n # We have now the current wr. Repopulate from here till the end and locate\n self._ordered_way_relations = self._ordered_way_relations[idx:]\n self._reset()\n self._locate()\n\n # If the active way is diverting, check whether there are posibilities to divert from the route in the\n # vecinity of the current location. If there are possibilities, then stop here to loose the route as we are\n # most likely driving away. If there are no possibilites, then stick to the route as the diversion is probably\n # just a matter of GPS accuracy. (It can happen after driving under a bridge)\n if wr.diverting and len(self._nodes_data.possible_divertions(self._ahead_idx, self._distance_to_node_ahead)) > 0:\n break\n\n # The current location in route is valid, return.\n return\n\n # if we got here, there is no new active way relation or driving direction has changed. Reset.\n self._reset()\n\n @property\n def speed_limits_ahead(self):\n \"\"\"Returns and array of SpeedLimitSection objects for the actual route ahead of current location\n \"\"\"\n if self._limits_ahead is not None:\n return self._limits_ahead\n\n if self._nodes_data is None or self._ahead_idx is None:\n return []\n\n self._limits_ahead = self._nodes_data.speed_limits_ahead(self._ahead_idx, self._distance_to_node_ahead)\n return self._limits_ahead\n\n @property\n def curvature_speed_limits_ahead(self):\n \"\"\"Returns and array of TurnSpeedLimitSection objects for the actual route ahead of current location due\n to curvatures\n \"\"\"\n if self._cuvature_limits_ahead is not None:\n return self._cuvature_limits_ahead\n\n if self._nodes_data is None or self._ahead_idx is None:\n return []\n\n self._cuvature_limits_ahead = self._nodes_data. \\\n curvatures_speed_limit_sections_ahead(self._ahead_idx, self._distance_to_node_ahead)\n\n return self._cuvature_limits_ahead\n\n @property\n def current_speed_limit(self):\n if not self.located:\n return None\n\n limits_ahead = self.speed_limits_ahead\n if len(limits_ahead) == 0 or limits_ahead[0].start != 0:\n return None\n\n return limits_ahead[0].value\n\n @property\n def current_curvature_speed_limit_section(self):\n if not self.located:\n return None\n\n limits_ahead = self.curvature_speed_limits_ahead\n if len(limits_ahead) == 0 or limits_ahead[0].start != 0:\n return None\n\n return limits_ahead[0]\n\n @property\n def next_speed_limit_section(self):\n if not self.located:\n return None\n\n limits_ahead = self.speed_limits_ahead\n if len(limits_ahead) == 0:\n return None\n\n # Find the first section that does not start in 0. i.e. the next section\n for section in limits_ahead:\n if section.start > 0:\n return section\n\n return None\n\n def next_curvature_speed_limit_sections(self, horizon_mts):\n if not self.located:\n return []\n\n # Provide the curvature speed sections that start ahead (> 0) and up to horizon\n return list(filter(lambda la: la.start > 0 and la.start <= horizon_mts, self.curvature_speed_limits_ahead))\n\n @property\n def distance_to_end(self):\n if not self.located:\n return None\n\n return self._nodes_data.distance_to_end(self._ahead_idx, self._distance_to_node_ahead)\n" ]
[ [ "numpy.array", "numpy.radians", "numpy.testing.assert_array_almost_equal" ], [ "numpy.nonzero", "numpy.array_equal", "numpy.cos", "numpy.arctan2", "numpy.argmin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yongzx/Semi-supervised-Deep-Embedded-Clustering-with-Anomaly-Detection-for-Semantic-Frame-Induction
[ "7ead941bdf50093f1b8dd860cdeb5f04fb223165" ]
[ "evaluation/clustering.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Nov 04, 2019\n@author: yongzhengxin\n\"\"\"\n\nimport numpy as np\nfrom sklearn import metrics\nimport bcubed\n\ndef purity_score(y_true, y_pred, inv=False):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :param inv: boolean\n :return: purity (inv = False) or inverse-purity (inv = True)\n \"\"\"\n # compute contingency matrix (also called confusion matrix)\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)\n axis = 0 if not inv else 1\n\n # return purity\n return np.sum(np.amax(contingency_matrix, axis=axis)) / np.sum(contingency_matrix)\n\n\ndef f_purity_score(y_true, y_pred):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :return: F1 purity score\n\n Implementation details - harmonic mean of purity and inverse purity score - see https://arxiv.org/pdf/1401.4590.pdf\n \"\"\"\n return 2 * (purity_score(y_true, y_pred) * purity_score(y_true, y_pred, inv=True)) / (purity_score(y_true, y_pred) + purity_score(y_true, y_pred, inv=True))\n\n\ndef external_eval_clusters(y_true, y_pred):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :return: external evaluation metrics of clustering quality.\n The metrics are purity, inverse purity, harmonic mean, b-cubed precision, recall and their harmonic mean.\n \"\"\"\n purity = purity_score(y_true, y_pred)\n inverse_purity = purity_score(y_true, y_pred, inv=True)\n f_purity = f_purity_score(y_true, y_pred)\n\n ldict = {i: {cluster_idx} for i, cluster_idx in enumerate(y_true)}\n cdict = {i: {cluster_idx} for i, cluster_idx in enumerate(y_pred)}\n bcubed_precision = bcubed.precision(cdict, ldict)\n bcubed_recall = bcubed.recall(cdict, ldict)\n bcubed_fscore = bcubed.fscore(bcubed_precision, bcubed_recall)\n\n return purity, inverse_purity, f_purity, bcubed_precision, bcubed_recall, bcubed_fscore\n\n\ndef print_external_eval_clusters(purity, inverse_purity, f_purity, bcubed_precision, bcubed_recall, bcubed_fscore):\n \"\"\"\n Print out the external evaluation metrics of clustering quality.\n \"\"\"\n print(\"Purity:\", purity)\n print(\"Inverse Purity:\", inverse_purity)\n print(\"F-score (Purity and Inverse Purity):\", f_purity)\n print(\"BCubed Precision:\", bcubed_precision)\n print(\"BCubed Recall:\", bcubed_recall)\n print(\"BCubed F1:\", bcubed_fscore)\n return\n" ]
[ [ "sklearn.metrics.cluster.contingency_matrix", "numpy.amax", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Steward3103/Tensorflow
[ "1039ff9ee8c8c7ed09f9bb106131a50285866dd4" ]
[ "tensorflow/contrib/lite/python/lite.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Lite tooling helper functionality.\n\nEXPERIMENTAL: APIs here are unstable and likely to change without notice.\n\n@@TocoConverter\n@@toco_convert\n@@toco_convert_protos\n@@Interpreter\n@@OpHint\n@@convert_op_hints_to_stubs\n\n@@FLOAT\n@@QUANTIZED_UINT8\n@@TFLITE\n@@GRAPHVIZ_DOT\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.lite.python import lite_constants as constants\nfrom tensorflow.contrib.lite.python.convert import tensor_name\nfrom tensorflow.contrib.lite.python.convert import toco_convert\nfrom tensorflow.contrib.lite.python.convert import toco_convert_protos # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model\nfrom tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.ops.variables import global_variables_initializer\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\n\n\nclass TocoConverter(object):\n \"\"\"Convert a TensorFlow model into `output_format` using TOCO.\n\n This is used to convert from a TensorFlow GraphDef or SavedModel into either a\n TFLite FlatBuffer or graph visualization.\n\n Attributes:\n\n inference_type: Currently must be `{FLOAT, QUANTIZED_UINT8}`.\n (default FLOAT)\n output_format: Type of data to write (currently must be TFLITE or\n GRAPHVIZ_DOT). (default TFLITE)\n quantized_input_stats: The mean and std deviation of training data for each\n input tensor. Only needed if `inference_type` is `QUANTIZED_UINT8`.\n (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n (default False)\n\n Example usage:\n\n # Converting a frozen graph.\n converter = lite.TocoConverter.from_session(sess, in_tensors, out_tensors)\n tflite_model = converter.convert()\n open(\"converted_model.tflite\", \"wb\").write(tflite_model)\n\n # Converting a SavedModel.\n converter = lite.TocoConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n \"\"\"\n\n def __init__(self, graph_def, input_tensors, output_tensors):\n \"\"\"Constructor for TocoConverter.\n\n Args:\n\n graph_def: TensorFlow GraphDef.\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.get_shape()` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n \"\"\"\n self._graph_def = graph_def\n self._input_tensors = input_tensors\n self._output_tensors = output_tensors\n self.inference_type = constants.FLOAT\n self.output_format = constants.TFLITE\n self.quantized_input_stats = None\n self.drop_control_dependency = True\n self.allow_custom_ops = False\n\n @classmethod\n def from_session(cls,\n sess,\n input_tensors,\n output_tensors,\n freeze_variables=False):\n \"\"\"Creates a TocoConverter class from a TensorFlow Session.\n\n Args:\n sess: TensorFlow Session.\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.get_shape()` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n freeze_variables: Boolean indicating whether the variables need to be\n converted into constants via the freeze_graph.py script.\n (default False)\n\n Returns:\n TocoConverter class.\n \"\"\"\n\n # Get GraphDef.\n if freeze_variables:\n sess.run(global_variables_initializer())\n output_arrays = [tensor_name(tensor) for tensor in output_tensors]\n graph_def = tf_graph_util.convert_variables_to_constants(\n sess, sess.graph_def, output_arrays)\n else:\n graph_def = sess.graph_def\n\n # Create TocoConverter class.\n return cls(graph_def, input_tensors, output_tensors)\n\n @classmethod\n def from_saved_model(\n cls,\n saved_model_dir,\n input_arrays=None,\n input_shapes=None,\n output_arrays=None,\n tag_set=None,\n signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):\n \"\"\"Creates a TocoConverter class from a SavedModel.\n\n Args:\n saved_model_dir: SavedModel directory to convert.\n input_arrays: List of input tensors to freeze graph with. Uses input\n arrays from SignatureDef when none are provided. (default None)\n input_shapes: Map of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n Automatically determined when input shapes is None (e.g., {\"foo\" :\n None}). (default None)\n output_arrays: List of output tensors to freeze graph with. Uses output\n arrays from SignatureDef when none are provided. (default None)\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\n analyze. All tags in the tag set must be present. (default \"serve\")\n signature_key: Key identifying SignatureDef containing inputs and outputs.\n\n Returns:\n TocoConverter class.\n \"\"\"\n if tag_set is None:\n tag_set = set([tag_constants.SERVING])\n\n result = freeze_saved_model(saved_model_dir, input_arrays, input_shapes,\n output_arrays, tag_set, signature_key)\n return cls(\n graph_def=result[0], input_tensors=result[1], output_tensors=result[2])\n\n def convert(self):\n \"\"\"Converts a TensorFlow GraphDef based on instance variables.\n\n Returns:\n The converted data in serialized format. Either a TFLite Flatbuffer or a\n Graphviz graph depending on value in `output_format`.\n\n Raises:\n ValueError:\n None value for dimension in input_tensor.\n \"\"\"\n # Checks dimensions in input tensor.\n for tensor in self._input_tensors:\n shape = tensor.get_shape().as_list()\n if None in shape[1:]:\n raise ValueError(\n \"None is only supported in the 1st dimension. Tensor '{0}' has \"\n \"invalid shape '{1}'.\".format(tensor.name, shape))\n elif shape[0] is None:\n self._set_batch_size(batch_size=1)\n\n # Converts model.\n result = toco_convert(\n input_data=self._graph_def,\n input_tensors=self._input_tensors,\n output_tensors=self._output_tensors,\n inference_type=self.inference_type,\n input_format=constants.TENSORFLOW_GRAPHDEF,\n output_format=self.output_format,\n quantized_input_stats=self.quantized_input_stats,\n drop_control_dependency=self.drop_control_dependency)\n return result\n\n def _set_batch_size(self, batch_size):\n \"\"\"Sets the first dimension of the input tensor to `batch_size`.\n\n Args:\n batch_size: Batch size for the model. Replaces the first dimension of an\n input size array if undefined. (default 1)\n \"\"\"\n for tensor in self._input_tensors:\n shape = tensor.get_shape().as_list()\n shape[0] = batch_size\n tensor.set_shape(shape)\n" ]
[ [ "tensorflow.contrib.lite.python.convert_saved_model.freeze_saved_model", "tensorflow.contrib.lite.python.convert.tensor_name", "tensorflow.contrib.lite.python.convert.toco_convert", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.python.ops.variables.global_variables_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12" ] } ]
jjakimoto/rl_traders.py
[ "d5411c96d49ba6a54751d12cdd11974e5cc1a8aa" ]
[ "rl_traders/models.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch_utils.models import FeedForward\n\n\nclass EIIEFeedForwarad(nn.Module):\n def __init__(self, model_params, cash_bias):\n super(EIIEFeedForwarad, self).__init__()\n self.lower_model = FeedForward(model_params['lower_params'])\n self.upper_model = FeedForward(model_params['upper_params'])\n self.cash_bias = nn.Parameter(cash_bias)\n\n def forward(self, states, prev_actions):\n n_batch = states.shape[0]\n outputs = self.lower_model(states)\n # We do not use cash actions as input, prev_actions[:, 0]\n prev_actions = prev_actions[:, None, None, 1:]\n # Concatenation with channel dimension\n outputs = torch.cat((outputs, prev_actions), dim=1)\n prev_softmax = self.upper_model(outputs)\n _cash_bias = self.cash_bias.repeat(n_batch, 1)\n prev_softmax = torch.cat((_cash_bias, prev_softmax), dim=-1)\n actions = F.softmax(prev_softmax, dim=-1)\n return actions\n\n def predict(self, state, prev_action):\n states = state[None, :]\n prev_actions = prev_action[None, :]\n return self.forward(states, prev_actions)[0].detach().numpy()\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.Parameter", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tubamuzzaffar/RACT
[ "6c0a44eb795c3b54a0d43e424cb80c400b306197" ]
[ "utils/data_loaders.py" ]
[ "from scipy import sparse\nimport pandas as pd\nimport numpy as np\nimport os\n\nimport tensorflow as tf\n\n\ndef load_train_data(csv_file, n_items):\n tp = pd.read_csv(csv_file)\n n_users = tp['uid'].max() + 1\n\n rows, cols = tp['uid'], tp['sid']\n data = sparse.csr_matrix((np.ones_like(rows), (rows, cols)),\n dtype='float64',\n shape=(n_users, n_items))\n return data\n\n\ndef load_tr_te_data(csv_file_tr, csv_file_te, n_items):\n tp_tr = pd.read_csv(csv_file_tr)\n tp_te = pd.read_csv(csv_file_te)\n\n start_idx = min(tp_tr['uid'].min(), tp_te['uid'].min())\n end_idx = max(tp_tr['uid'].max(), tp_te['uid'].max())\n\n rows_tr, cols_tr = tp_tr['uid'] - start_idx, tp_tr['sid']\n rows_te, cols_te = tp_te['uid'] - start_idx, tp_te['sid']\n\n data_tr = sparse.csr_matrix((np.ones_like(rows_tr), (rows_tr, cols_tr)),\n dtype='float64',\n shape=(end_idx - start_idx + 1, n_items))\n data_te = sparse.csr_matrix((np.ones_like(rows_te), (rows_te, cols_te)),\n dtype='float64',\n shape=(end_idx - start_idx + 1, n_items))\n return data_tr, data_te\n\n\ndef tr_te_dataset(data_tr, data_te, batch_size):\n # https://www.tensorflow.org/performance/performance_guide makes me think that I'm doing\n # something wrong, because my GPU usage hovers near 0 usually. That's v disappointing. I hope\n # I can speed it up hugely...\n # This is going to take in the output of data_tr and data_te, and turn them into\n # things we can sample from.\n\n # The only worry I have is, I don't know exactly how to do the whole \"masking\" part in here..\n\n # The way it works is, load_train_data just loads in training data, while load_tr_te_data\n # has goal-vectors as well. These are the ones that you drop-out. So, this really should be fine.\n\n data_tr = data_tr.astype(np.float32)\n data_tr_coo = data_tr.tocoo()\n\n n_items = data_tr_coo.shape[1]\n\n indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()\n sparse_data_tr = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)\n\n data_te = data_te.astype(np.float32)\n data_te_coo = data_te.tocoo()\n\n indices = np.mat([data_te_coo.row, data_te_coo.col]).transpose()\n sparse_data_te = tf.SparseTensor(indices, data_te_coo.data, data_te_coo.shape)\n\n samples_tr = tf.data.Dataset.from_tensor_slices(sparse_data_tr)\n samples_te = tf.data.Dataset.from_tensor_slices(sparse_data_te)\n\n # 10000 might be too big to sample from... Not sure how that's supposed to work with batch anyways.\n dataset = tf.data.Dataset.zip((samples_tr, samples_te)).shuffle(100).batch(\n batch_size, drop_remainder=True)\n\n dataset = dataset.map(lambda x, y: (tf.sparse_tensor_to_dense(x), tf.sparse_tensor_to_dense(y)))\n\n expected_shape = tf.TensorShape([batch_size, n_items])\n dataset = dataset.apply(tf.contrib.data.assert_element_shape((expected_shape, expected_shape)))\n\n # dataset = dataset.skip(15)\n\n return dataset\n # dataset = dataset.map()\n\n\ndef train_dataset(data_tr, batch_size):\n\n # Note: I'm going to do the most heinous of things: I'm going to add in a fake operation here,\n # so that it has the same form as the other guy.\n # That will let us swap them out.\n\n data_tr = data_tr.astype(np.float32)\n\n data_tr_coo = data_tr.tocoo()\n\n n_items = data_tr_coo.shape[1]\n\n indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()\n sparse_data = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)\n\n samples_tr = tf.data.Dataset.from_tensor_slices(sparse_data)\n\n\n dataset = samples_tr.shuffle(100).batch(batch_size, drop_remainder=True)#.map(tf.sparse_to_dense)\n dataset = dataset.map(tf.sparse_tensor_to_dense)\n\n expected_shape = tf.TensorShape([batch_size, n_items])\n dataset = dataset.apply(tf.contrib.data.assert_element_shape(expected_shape))\n\n dataset = dataset.zip((dataset, dataset))\n # dataset.apply(tf.contrib.data.assert_element_shape([expected_shape, expected_shape]))\n\n # dataset = dataset.skip(200)\n\n return dataset\n\n\ndef get_batch_from_list(idxlist, batch_size, batch_num, data):\n disc_training_indices = idxlist[(batch_size * batch_num):(batch_size * (batch_num + 1))]\n X_train = data[disc_training_indices]\n if sparse.isspmatrix(X_train):\n X_train = X_train.toarray()\n X_train = X_train.astype('float32')\n return X_train\n\n\ndef get_num_items(pro_dir):\n unique_sid = list()\n with open(os.path.join(pro_dir, 'unique_sid.txt'), 'r') as f:\n for line in f:\n unique_sid.append(line.strip())\n\n n_items = len(unique_sid)\n print(\"n_items: {}\".format(n_items))\n return n_items\n" ]
[ [ "scipy.sparse.isspmatrix", "tensorflow.TensorShape", "pandas.read_csv", "numpy.ones_like", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.sparse_tensor_to_dense", "tensorflow.SparseTensor", "tensorflow.data.Dataset.zip", "tensorflow.contrib.data.assert_element_shape", "numpy.mat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10" ] } ]
timcera/plottoolbox
[ "b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298", "b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298", "b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298" ]
[ "src/plottoolbox/skill_metrics/index_agreement.py", "src/plottoolbox/functions/lag_plot.py", "src/plottoolbox/skill_metrics/overlay_taylor_diagram_circles.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\n\nfrom . import utils\n\n\ndef index_agreement(simulated, observed):\n \"\"\"\n Calculate the index of agreement.\n\n Calculates the index of agreement between two variables\n simulated and observed. The index_agreement is calculated using the\n formula:\n\n index_agreement = 1.0 - sum((o - s)**2) /\n sum((abs(s - mean(o)) + abs(o - mean(o)))**2)\n\n where s is the simulated values, o is the observed values, and\n N is the total number of values in s & o. Note that s & o must\n have the same number of values.\n\n The index of agreement is between 0 and 1, where 1 is a perfect match.\n\n Input:\n simulated : simulated values\n observed : observed values\n\n Output:\n index_agreement : index of agreement\n \"\"\"\n # Check that dimensions of simulated and observed fields match\n utils.check_arrays(simulated, observed)\n\n # Calculate the index_agreement\n index_agreement = 1.0 - (\n np.sum((observed - simulated) ** 2)\n / (\n np.sum(\n (\n np.abs(simulated - np.mean(observed))\n + np.abs(observed - np.mean(observed))\n )\n ** 2\n )\n )\n )\n\n return index_agreement\n", "# -*- coding: utf-8 -*-\n\"\"\"Collection of functions for the manipulation of time series.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport itertools\nimport os\nimport warnings\n\nimport mando\nimport numpy as np\nimport pandas as pd\nfrom mando.rst_text_formatter import RSTHelpFormatter\nfrom tstoolbox import tsutils\n\nfrom .. import plotutils\n\nwarnings.filterwarnings(\"ignore\")\n\n\[email protected](\"lag_plot\", formatter_class=RSTHelpFormatter, doctype=\"numpy\")\[email protected](plotutils.ldocstrings)\ndef lag_plot_cli(\n input_ts=\"-\",\n columns=None,\n start_date=None,\n end_date=None,\n clean=False,\n skiprows=None,\n index_type=\"datetime\",\n names=None,\n ofilename=\"plot.png\",\n xtitle=\"\",\n ytitle=\"\",\n title=\"\",\n figsize=\"10,6.0\",\n legend=None,\n legend_names=None,\n subplots=False,\n sharex=True,\n sharey=False,\n colors=\"auto\",\n linestyles=\"auto\",\n markerstyles=\" \",\n bar_hatchstyles=\"auto\",\n style=\"auto\",\n logx=False,\n logy=False,\n xaxis=\"arithmetic\",\n yaxis=\"arithmetic\",\n xlim=None,\n ylim=None,\n secondary_y=False,\n mark_right=True,\n scatter_matrix_diagonal=\"kde\",\n bootstrap_size=50,\n bootstrap_samples=500,\n norm_xaxis=False,\n norm_yaxis=False,\n lognorm_xaxis=False,\n lognorm_yaxis=False,\n xy_match_line=\"\",\n grid=False,\n label_rotation=None,\n label_skip=1,\n force_freq=None,\n drawstyle=\"default\",\n por=False,\n invert_xaxis=False,\n invert_yaxis=False,\n round_index=None,\n plotting_position=\"weibull\",\n prob_plot_sort_values=\"descending\",\n source_units=None,\n target_units=None,\n lag_plot_lag=1,\n plot_styles=\"bright\",\n hlines_y=None,\n hlines_xmin=None,\n hlines_xmax=None,\n hlines_colors=None,\n hlines_linestyles=\"-\",\n vlines_x=None,\n vlines_ymin=None,\n vlines_ymax=None,\n vlines_colors=None,\n vlines_linestyles=\"-\",\n):\n r\"\"\"Lag plot.\n\n \"lag_plot\" indicates structure in the data.\n\n {yone}\n\n Parameters\n ----------\n {input_ts}\n\n ofilename : str\n [optional, defaults to 'plot.png']\n\n Output filename for the plot. Extension defines\n the type, for example 'filename.png' will create a PNG file.\n\n If used within Python, and `ofilename` is None will return the\n Matplotlib figure that can then be changed or added to as\n needed.\n\n lag_plot_lag\n [optional, default to 1]\n\n The lag used if ``type`` \"lag_plot\" is chosen.\n\n xtitle : str\n [optional, default depends on ``type``]\n\n Title of x-axis.\n\n ytitle : str\n [optional, default depends on ``type``]\n\n Title of y-axis.\n\n title : str\n [optional, defaults to '']\n\n Title of chart.\n\n figsize : str\n [optional, defaults to '10,6.5']\n\n The 'width,height' of plot in inches.\n\n legend\n [optional, defaults to True]\n\n Whether to display the legend.\n\n legend_names : str\n [optional, defaults to None]\n\n Legend would normally use the time-series names associated with\n the input data. The 'legend_names' option allows you to\n override the names in the data set. You must supply a comma\n separated list of strings for each time-series in the data set.\n\n subplots\n [optional, defaults to False]\n\n Make separate subplots for each time series.\n\n sharex\n [optional, default to True]\n\n In case subplots=True, share x axis.\n\n sharey\n [optional, default to False]\n\n In case subplots=True, share y axis.\n\n colors\n [optional, default is 'auto']\n\n The default 'auto' will cycle through matplotlib colors in the chosen\n style.\n\n At the command line supply a comma separated matplotlib\n color codes, or within Python a list of color code strings.\n\n Can identify colors in four different ways.\n\n 1. Use 'CN' where N is a number from 0 to 9 that gets the Nth color\n from the current style.\n\n 2. Single character code from the table below.\n\n +------+---------+\n | Code | Color |\n +======+=========+\n | b | blue |\n +------+---------+\n | g | green |\n +------+---------+\n | r | red |\n +------+---------+\n | c | cyan |\n +------+---------+\n | m | magenta |\n +------+---------+\n | y | yellow |\n +------+---------+\n | k | black |\n +------+---------+\n\n 3. Number between 0 and 1 that represents the level of gray, where 0 is\n white an 1 is black.\n\n 4. Any of the HTML color names.\n\n +------------------+\n | HTML Color Names |\n +==================+\n | red |\n +------------------+\n | burlywood |\n +------------------+\n | chartreuse |\n +------------------+\n | ...etc. |\n +------------------+\n\n Color reference:\n http://matplotlib.org/api/colors_api.html\n\n linestyles\n [optional, default to 'auto']\n\n If 'auto' will iterate through the available matplotlib line types.\n Otherwise on the command line a comma separated list, or a list of\n strings if using the Python API.\n\n To not display lines use a space (' ') as the linestyle code.\n\n Separated 'colors', 'linestyles', and 'markerstyles' instead of using\n the 'style' keyword.\n\n +---------+--------------+\n | Code | Lines |\n +=========+==============+\n | ``-`` | solid |\n +---------+--------------+\n | -- | dashed |\n +---------+--------------+\n | -. | dash_dot |\n +---------+--------------+\n | : | dotted |\n +---------+--------------+\n | None | draw nothing |\n +---------+--------------+\n | ' ' | draw nothing |\n +---------+--------------+\n | '' | draw nothing |\n +---------+--------------+\n\n Line reference:\n http://matplotlib.org/api/artist_api.html\n\n markerstyles\n [optional, default to ' ']\n\n The default ' ' will not plot a marker. If 'auto' will iterate through\n the available matplotlib marker types. Otherwise on the command line\n a comma separated list, or a list of strings if using the Python API.\n\n Separated 'colors', 'linestyles', and 'markerstyles' instead of using\n the 'style' keyword.\n\n +-------+----------------+\n | Code | Markers |\n +=======+================+\n | . | point |\n +-------+----------------+\n | o | circle |\n +-------+----------------+\n | v | triangle down |\n +-------+----------------+\n | ^ | triangle up |\n +-------+----------------+\n | < | triangle left |\n +-------+----------------+\n | > | triangle right |\n +-------+----------------+\n | 1 | tri_down |\n +-------+----------------+\n | 2 | tri_up |\n +-------+----------------+\n | 3 | tri_left |\n +-------+----------------+\n | 4 | tri_right |\n +-------+----------------+\n | 8 | octagon |\n +-------+----------------+\n | s | square |\n +-------+----------------+\n | p | pentagon |\n +-------+----------------+\n | ``*`` | star |\n +-------+----------------+\n | h | hexagon1 |\n +-------+----------------+\n | H | hexagon2 |\n +-------+----------------+\n | ``+`` | plus |\n +-------+----------------+\n | x | x |\n +-------+----------------+\n | D | diamond |\n +-------+----------------+\n | d | thin diamond |\n +-------+----------------+\n | _ | hlines_y |\n +-------+----------------+\n | None | nothing |\n +-------+----------------+\n | ' ' | nothing |\n +-------+----------------+\n | '' | nothing |\n +-------+----------------+\n\n Marker reference:\n http://matplotlib.org/api/markers_api.html\n\n style\n [optional, default is None]\n\n Still available, but if None is replaced by 'colors', 'linestyles', and\n 'markerstyles' options. Currently the 'style' option will override the\n others.\n\n Comma separated matplotlib style strings per time-series. Just\n combine codes in 'ColorMarkerLine' order, for example 'r*--' is\n a red dashed line with star marker.\n\n bar_hatchstyles\n [optional, default to \"auto\", only used if type equal to \"bar\", \"barh\",\n \"bar_stacked\", and \"barh_stacked\"]\n\n If 'auto' will iterate through the available matplotlib hatch types.\n Otherwise on the command line a comma separated list, or a list of\n strings if using the Python API.\n\n +-----------------+-------------------+\n | bar_hatchstyles | Description |\n +=================+===================+\n | / | diagonal hatching |\n +-----------------+-------------------+\n | ``\\`` | back diagonal |\n +-----------------+-------------------+\n | ``|`` | vertical |\n +-----------------+-------------------+\n | - | horizontal |\n +-----------------+-------------------+\n | + | crossed |\n +-----------------+-------------------+\n | x | crossed diagonal |\n +-----------------+-------------------+\n | o | small circle |\n +-----------------+-------------------+\n | O | large circle |\n +-----------------+-------------------+\n | . | dots |\n +-----------------+-------------------+\n | * | stars |\n +-----------------+-------------------+\n\n logx\n DEPRECATED: use '--xaxis=\"log\"' instead.\n\n logy\n DEPRECATED: use '--yaxis=\"log\"' instead.\n\n xlim\n [optional, default is based on range of x values]\n\n Comma separated lower and upper limits for the x-axis of the\n plot. For example, '--xlim 1,1000' would limit the plot from\n 1 to 1000, where '--xlim ,1000' would base the lower limit on\n the data and set the upper limit to 1000.\n\n ylim\n [optional, default is based on range of y values]\n\n Comma separated lower and upper limits for the y-axis of the\n plot. See `xlim` for examples.\n\n xaxis : str\n [optional, default is 'arithmetic']\n\n Defines the type of the xaxis. One of 'arithmetic', 'log'.\n\n yaxis : str\n [optional, default is 'arithmetic']\n\n Defines the type of the yaxis. One of 'arithmetic', 'log'.\n\n secondary_y\n [optional, default is False]\n\n Whether to plot on the secondary y-axis. If a list/tuple, which\n time-series to plot on secondary y-axis.\n\n mark_right\n [optional, default is True]\n\n When using a secondary_y axis, should the legend label the axis of the\n various time-series automatically.\n\n scatter_matrix_diagonal : str\n [optional, defaults to 'kde']\n\n If plot type is 'scatter_matrix', this specifies the plot along the\n diagonal. One of 'kde' for Kernel Density Estimation or 'hist'\n for a histogram.\n\n bootstrap_size : int\n [optional, defaults to 50]\n\n The size of the random subset for 'bootstrap' plot.\n\n bootstrap_samples\n [optional, defaults to 500]\n\n The number of random subsets of 'bootstrap_size'.\n\n norm_xaxis\n DEPRECATED: use '--type=\"norm_xaxis\"' instead.\n\n norm_yaxis\n DEPRECATED: use '--type=\"norm_yaxis\"' instead.\n\n lognorm_xaxis\n DEPRECATED: use '--type=\"lognorm_xaxis\"' instead.\n\n lognorm_yaxis\n DEPRECATED: use '--type=\"lognorm_yaxis\"' instead.\n\n xy_match_line : str\n [optional, defaults is '']\n\n Will add a match line where x == y. Set to a line style code.\n\n grid\n [optional, default is False]\n\n Whether to plot grid lines on the major ticks.\n\n label_rotation : int\n [optional]\n\n Rotation for major labels for bar plots.\n\n label_skip : int\n [optional]\n\n Skip for major labels for bar plots.\n\n drawstyle : str\n [optional, default is 'default']\n\n 'default' connects the points with lines. The\n steps variants produce step-plots. 'steps' is equivalent to 'steps-pre'\n and is maintained for backward-compatibility.\n\n ACCEPTS::\n\n ['default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post']\n\n por\n [optional]\n\n Plot from first good value to last good value. Strips NANs\n from beginning and end.\n\n {force_freq}\n\n invert_xaxis\n [optional, default is False]\n\n Invert the x-axis.\n\n invert_yaxis\n [optional, default is False]\n\n Invert the y-axis.\n\n plotting_position : str\n [optional, default is 'weibull']\n\n {plotting_position_table}\n\n Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,\n lognorm_yaxis, weibull_xaxis, and weibull_yaxis.\n\n prob_plot_sort_values : str\n [optional, default is 'descending']\n\n How to sort the values for the probability plots.\n\n Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,\n lognorm_yaxis, weibull_xaxis, and weibull_yaxis.\n\n {columns}\n\n {start_date}\n\n {end_date}\n\n {clean}\n\n {skiprows}\n\n {index_type}\n\n {names}\n\n {source_units}\n\n {target_units}\n\n {round_index}\n\n plot_styles: str\n [optional, default is \"default\"]\n\n Set the style of the plot. One or more of Matplotlib styles \"classic\",\n \"Solarize_Light2\", \"bmh\", \"dark_background\", \"fast\", \"fivethirtyeight\",\n \"ggplot\", \"grayscale\", \"seaborn\", \"seaborn-bright\",\n \"seaborn-colorblind\", \"seaborn-dark\", \"seaborn-dark-palette\",\n \"seaborn-darkgrid\", \"seaborn-deep\", \"seaborn-muted\",\n \"seaborn-notebook\", \"seaborn-paper\", \"seaborn-pastel\",\n \"seaborn-poster\", \"seaborn-talk\", \"seaborn-ticks\", \"seaborn-white\",\n \"seaborn-whitegrid\", \"tableau-colorblind10\", and\n\n SciencePlots styles \"science\", \"grid\", \"ieee\", \"scatter\", \"notebook\",\n \"high-vis\", \"bright\", \"vibrant\", \"muted\", and \"retro\".\n\n If multiple styles then each over rides some or all of the\n characteristics of the previous.\n\n Color Blind Appropriate Styles\n\n The styles \"seaborn-colorblind\", \"tableau-colorblind10\", \"bright\",\n \"vibrant\", and \"muted\" are all styles that are setup to be able to be\n distinguished by someone with color blindness.\n\n Black, White, and Gray Styles\n\n The \"ieee\" style is appropriate for black, white, and gray, however the\n \"ieee\" also will change the chart size to fit in a column of the \"IEEE\"\n journal.\n\n The \"grayscale\" is another style useful for photo-copyable black,\n white, nd gray.\n\n Matplotlib styles:\n https://matplotlib.org/3.3.1/gallery/style_sheets/style_sheets_reference.html\n\n SciencePlots styles:\n https://github.com/garrettj403/SciencePlots\n\n hlines_y:\n [optional, defaults to None]\n\n Number or list of y values where to place a horizontal line.\n\n hlines_xmin:\n [optional, defaults to None]\n\n List of minimum x values to start the horizontal line. If a list must be same length as `hlines_y`. If a single number will be used as the minimum x values for all horizontal lines. A missing value or None will start at the minimum x value for the entire plot.\n\n hlines_xmax:\n [optional, defaults to None]\n\n List of maximum x values to end each horizontal line. If a list must be same length as `hlines_y`. If a single number will be the maximum x value for all horizontal lines. A missing value or None will end at the maximum x value for the entire plot.\n\n hlines_colors:\n [optional, defaults to None]\n\n List of colors for the horizontal lines. If a single color then will be used as the color for all horizontal lines. If a list must be same length as `hlines_y`. If None will take from the color pallette in the current plot style.\n\n hlines_linestyles:\n [optional, defaults to None]\n\n List of linestyles for the horizontal lines. If a single linestyle then will be used as the linestyle for all horizontal lines. If a list must be same length as `hlines_y`. If None will take for the standard linestyles list.\n\n vlines_x:\n [optional, defaults to None]\n\n List of x values where to place a vertical line.\n\n vlines_ymin:\n [optional, defaults to None]\n\n List of minimum y values to start the vertical line. If a list must be same length as `vlines_x`. If a single number will be used as the minimum x values for all vertical lines. A missing value or None will start at the minimum x value for the entire plot.\n\n vlines_ymax:\n [optional, defaults to None]\n\n List of maximum x values to end each vertical line. If a list must be same length as `vlines_x`. If a single number will be the maximum x value for all vertical lines. A missing value or None will end at the maximum x value for the entire plot.\n\n vlines_colors:\n [optional, defaults to None]\n\n List of colors for the vertical lines. If a single color then will be used as the color for all vertical lines. If a list must be same length as `vlines_x`. If None will take from the color pallette in the current plot style.\n\n vlines_linestyles:\n [optional, defaults to None]\n\n List of linestyles for the vertical lines. If a single linestyle then will be used as the linestyle for all vertical lines. If a list must be same length as `vlines_x`. If None will take for the standard linestyles list.\n \"\"\"\n plt = lag_plot(\n input_ts=input_ts,\n columns=columns,\n start_date=start_date,\n end_date=end_date,\n clean=clean,\n skiprows=skiprows,\n index_type=index_type,\n names=names,\n ofilename=ofilename,\n xtitle=xtitle,\n ytitle=ytitle,\n title=title,\n figsize=figsize,\n legend=legend,\n legend_names=legend_names,\n subplots=subplots,\n sharex=sharex,\n sharey=sharey,\n colors=colors,\n linestyles=linestyles,\n markerstyles=markerstyles,\n bar_hatchstyles=bar_hatchstyles,\n style=style,\n logx=logx,\n logy=logy,\n xaxis=xaxis,\n yaxis=yaxis,\n xlim=xlim,\n ylim=ylim,\n secondary_y=secondary_y,\n mark_right=mark_right,\n scatter_matrix_diagonal=scatter_matrix_diagonal,\n bootstrap_size=bootstrap_size,\n bootstrap_samples=bootstrap_samples,\n norm_xaxis=norm_xaxis,\n norm_yaxis=norm_yaxis,\n lognorm_xaxis=lognorm_xaxis,\n lognorm_yaxis=lognorm_yaxis,\n xy_match_line=xy_match_line,\n grid=grid,\n label_rotation=label_rotation,\n label_skip=label_skip,\n force_freq=force_freq,\n drawstyle=drawstyle,\n por=por,\n invert_xaxis=invert_xaxis,\n invert_yaxis=invert_yaxis,\n round_index=round_index,\n plotting_position=plotting_position,\n prob_plot_sort_values=prob_plot_sort_values,\n source_units=source_units,\n target_units=target_units,\n lag_plot_lag=lag_plot_lag,\n plot_styles=plot_styles,\n hlines_y=hlines_y,\n hlines_xmin=hlines_xmin,\n hlines_xmax=hlines_xmax,\n hlines_colors=hlines_colors,\n hlines_linestyles=hlines_linestyles,\n vlines_x=vlines_x,\n vlines_ymin=vlines_ymin,\n vlines_ymax=vlines_ymax,\n vlines_colors=vlines_colors,\n vlines_linestyles=vlines_linestyles,\n )\n\n\n# @tsutils.validator(\n# ofilename=[str, [\"pass\", []], 1],\n# type=[str, [\"domain\", [\"lag_plot\",],], 1,],\n# lag_plot_lag=[int, [\"range\", [1, None]], 1],\n# xtitle=[str, [\"pass\", []], 1],\n# ytitle=[str, [\"pass\", []], 1],\n# title=[str, [\"pass\", []], 1],\n# figsize=[float, [\"range\", [0, None]], 2],\n# legend=[bool, [\"domain\", [True, False]], 1],\n# legend_names=[str, [\"pass\", []], 1],\n# subplots=[bool, [\"domain\", [True, False]], 1],\n# sharex=[bool, [\"domain\", [True, False]], 1],\n# sharey=[bool, [\"domain\", [True, False]], 1],\n# colors=[str, [\"pass\", []], None],\n# linestyles=[str, [\"domain\", [\"auto\", None, \"\", \" \", \" \"] + plotutils.LINE_LIST], None],\n# markerstyles=[str, [\"domain\", [\"auto\", None, \"\", \" \", \" \"] + plotutils.MARKER_LIST], None],\n# bar_hatchstyles=[str, [\"domain\", [\"auto\", None, \"\", \" \", \" \"] + plotutils.HATCH_LIST], None],\n# style=[str, [\"pass\", []], None],\n# xlim=[float, [\"pass\", []], 2],\n# ylim=[float, [\"pass\", []], 2],\n# xaxis=[str, [\"domain\", [\"arithmetic\", \"log\"]], 1],\n# yaxis=[str, [\"domain\", [\"arithmetic\", \"log\"]], 1],\n# secondary_y=[bool, [\"domain\", [True, False]], 1],\n# mark_right=[bool, [\"domain\", [True, False]], 1],\n# scatter_matrix_diagonal=[str, [\"domain\", [\"kde\", \"hist\"]], 1],\n# bootstrap_size=[int, [\"range\", [0, None]], 1],\n# xy_match_line=[str, [\"pass\", []], 1],\n# grid=[bool, [\"domain\", [True, False]], 1],\n# label_rotation=[float, [\"pass\", []], 1],\n# label_skip=[int, [\"range\", [1, None]], 1],\n# drawstyle=[str, [\"pass\", []], 1],\n# por=[bool, [\"domain\", [True, False]], 1],\n# invert_xaxis=[bool, [\"domain\", [True, False]], 1],\n# invert_yaxis=[bool, [\"domain\", [True, False]], 1],\n# plotting_position=[\n# str,\n# [\n# \"domain\",\n# [\"weibull\", \"benard\", \"tukey\", \"gumbel\", \"hazen\", \"cunnane\", \"california\"],\n# ],\n# 1,\n# ],\n# prob_plot_sort_values=[str, [\"domain\", [\"ascending\", \"descending\"]], 1],\n# plot_styles=[\n# str,\n# [\n# \"domain\",\n# [\n# \"classic\",\n# \"Solarize_Light2\",\n# \"bmh\",\n# \"dark_background\",\n# \"fast\",\n# \"fivethirtyeight\",\n# \"ggplot\",\n# \"grayscale\",\n# \"seaborn\",\n# \"seaborn-bright\",\n# \"seaborn-colorblind\",\n# \"seaborn-dark\",\n# \"seaborn-dark-palette\",\n# \"seaborn-darkgrid\",\n# \"seaborn-deep\",\n# \"seaborn-muted\",\n# \"seaborn-notebook\",\n# \"seaborn-paper\",\n# \"seaborn-pastel\",\n# \"seaborn-poster\",\n# \"seaborn-talk\",\n# \"seaborn-ticks\",\n# \"seaborn-white\",\n# \"seaborn-whitegrid\",\n# \"tableau-colorblind10\",\n# \"science\",\n# \"grid\",\n# \"ieee\",\n# \"scatter\",\n# \"notebook\",\n# \"high-vis\",\n# \"bright\",\n# \"vibrant\",\n# \"muted\",\n# \"retro\",\n# ],\n# ],\n# None,\n# ],\n# hlines_y=[float, [\"pass\", []], None],\n# hlines_xmin=[float, [\"pass\", []], None],\n# hlines_xmax=[float, [\"pass\", []], None],\n# hlines_colors=[str, [\"pass\", []], None],\n# hlines_linestyles=[\n# str,\n# [\"domain\", [\"auto\", None, \"\", \" \", \" \"] + plotutils.LINE_LIST],\n# None,\n# ],\n# vlines_x=[float, [\"pass\", []], None],\n# vlines_ymin=[float, [\"pass\", []], None],\n# vlines_ymax=[float, [\"pass\", []], None],\n# vlines_colors=[str, [\"pass\", []], None],\n# vlines_linestyles=[\n# str,\n# [\"domain\", [\"auto\", None, \"\", \" \", \" \"] + plotutils.LINE_LIST],\n# None,\n# ],\n# )\ndef lag_plot(\n input_ts=\"-\",\n columns=None,\n start_date=None,\n end_date=None,\n clean=False,\n skiprows=None,\n index_type=\"datetime\",\n names=None,\n ofilename=\"plot.png\",\n xtitle=\"\",\n ytitle=\"\",\n title=\"\",\n figsize=\"10,6.0\",\n legend=None,\n legend_names=None,\n subplots=False,\n sharex=True,\n sharey=False,\n colors=\"auto\",\n linestyles=\"auto\",\n markerstyles=\" \",\n bar_hatchstyles=\"auto\",\n style=\"auto\",\n logx=False,\n logy=False,\n xaxis=\"arithmetic\",\n yaxis=\"arithmetic\",\n xlim=None,\n ylim=None,\n secondary_y=False,\n mark_right=True,\n scatter_matrix_diagonal=\"kde\",\n bootstrap_size=50,\n bootstrap_samples=500,\n norm_xaxis=False,\n norm_yaxis=False,\n lognorm_xaxis=False,\n lognorm_yaxis=False,\n xy_match_line=\"\",\n grid=False,\n label_rotation=None,\n label_skip=1,\n force_freq=None,\n drawstyle=\"default\",\n por=False,\n invert_xaxis=False,\n invert_yaxis=False,\n round_index=None,\n plotting_position=\"weibull\",\n prob_plot_sort_values=\"descending\",\n source_units=None,\n target_units=None,\n lag_plot_lag=1,\n plot_styles=\"bright\",\n hlines_y=None,\n hlines_xmin=None,\n hlines_xmax=None,\n hlines_colors=None,\n hlines_linestyles=\"-\",\n vlines_x=None,\n vlines_ymin=None,\n vlines_ymax=None,\n vlines_colors=None,\n vlines_linestyles=\"-\",\n **kwds,\n):\n r\"\"\"Plot data.\"\"\"\n # Need to work around some old option defaults with the implementation of\n # mando\n legend = bool(legend == \"\" or legend == \"True\" or legend is None)\n\n type = \"lag_plot\"\n\n import matplotlib\n\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n from matplotlib.ticker import FixedLocator\n\n tsd = tsutils.common_kwds(\n input_ts,\n skiprows=skiprows,\n names=names,\n index_type=index_type,\n start_date=start_date,\n end_date=end_date,\n pick=columns,\n round_index=round_index,\n dropna=\"all\",\n source_units=source_units,\n target_units=target_units,\n clean=clean,\n por=por,\n )\n\n tsd, lnames = plotutils.check(type, tsd, legend_names)\n\n # This is to help pretty print the frequency\n try:\n try:\n pltfreq = str(tsd.index.freq, \"utf-8\").lower()\n except TypeError:\n pltfreq = str(tsd.index.freq).lower()\n if pltfreq.split(\" \")[0][1:] == \"1\":\n beginstr = 3\n else:\n beginstr = 1\n if pltfreq == \"none\":\n short_freq = \"\"\n else:\n # short freq string (day) OR (2 day)\n short_freq = \"({})\".format(pltfreq[beginstr:-1])\n except AttributeError:\n short_freq = \"\"\n\n if colors == \"auto\":\n colors = None\n else:\n colors = tsutils.make_list(colors)\n\n if linestyles == \"auto\":\n linestyles = plotutils.LINE_LIST\n else:\n linestyles = tsutils.make_list(linestyles)\n\n if bar_hatchstyles == \"auto\":\n bar_hatchstyles = plotutils.HATCH_LIST\n else:\n bar_hatchstyles = tsutils.make_list(bar_hatchstyles)\n\n if markerstyles == \"auto\":\n markerstyles = plotutils.MARKER_LIST\n else:\n markerstyles = tsutils.make_list(markerstyles)\n if markerstyles is None:\n markerstyles = \" \"\n\n if style != \"auto\":\n\n nstyle = tsutils.make_list(style)\n if len(nstyle) != len(tsd.columns):\n raise ValueError(\n tsutils.error_wrapper(\n \"\"\"\nYou have to have the same number of style strings as time-series to plot.\nYou supplied '{}' for style which has {} style strings,\nbut you have {} time-series.\n\"\"\".format(\n style, len(nstyle), len(tsd.columns)\n )\n )\n )\n colors = []\n markerstyles = []\n linestyles = []\n for st in nstyle:\n colors.append(st[0])\n if len(st) == 1:\n markerstyles.append(\" \")\n linestyles.append(\"-\")\n continue\n if st[1] in plotutils.MARKER_LIST:\n markerstyles.append(st[1])\n try:\n linestyles.append(st[2:])\n except IndexError:\n linestyles.append(\" \")\n else:\n markerstyles.append(\" \")\n linestyles.append(st[1:])\n if linestyles is None:\n linestyles = [\" \"]\n else:\n linestyles = [\" \" if i in [\" \", None] else i for i in linestyles]\n markerstyles = [\" \" if i is None else i for i in markerstyles]\n\n if colors is not None:\n icolors = itertools.cycle(colors)\n else:\n icolors = None\n imarkerstyles = itertools.cycle(markerstyles)\n ilinestyles = itertools.cycle(linestyles)\n\n # Only for bar, barh, bar_stacked, and barh_stacked.\n ibar_hatchstyles = itertools.cycle(bar_hatchstyles)\n\n if (\n logx is True\n or logy is True\n or norm_xaxis is True\n or norm_yaxis is True\n or lognorm_xaxis is True\n or lognorm_yaxis is True\n ):\n warnings.warn(\n \"\"\"\n*\n* The --logx, --logy, --norm_xaxis, --norm_yaxis, --lognorm_xaxis, and\n* --lognorm_yaxis options are deprecated.\n*\n* For --logx use --xaxis=\"log\"\n* For --logy use --yaxis=\"log\"\n* For --norm_xaxis use --type=\"norm_xaxis\"\n* For --norm_yaxis use --type=\"norm_yaxis\"\n* For --lognorm_xaxis use --type=\"lognorm_xaxis\"\n* For --lognorm_yaxis use --type=\"lognorm_yaxis\"\n*\n\"\"\"\n )\n\n if xaxis == \"log\":\n logx = True\n if yaxis == \"log\":\n logy = True\n\n xlim = plotutils.know_your_limits(xlim, axis=xaxis)\n ylim = plotutils.know_your_limits(ylim, axis=yaxis)\n\n plot_styles = tsutils.make_list(plot_styles) + [\"no-latex\"]\n style_loc = os.path.join(\n os.path.dirname(__file__), os.pardir, \"SciencePlots_styles\"\n )\n plot_styles = [\n os.path.join(style_loc, i + \".mplstyle\")\n if os.path.exists(os.path.join(style_loc, i + \".mplstyle\"))\n else i\n for i in plot_styles\n ]\n plt.style.use(plot_styles)\n\n figsize = tsutils.make_list(figsize, n=2)\n _, ax = plt.subplots(figsize=figsize)\n\n from pandas.plotting import lag_plot\n\n lag_plot(tsd, lag=lag_plot_lag, ax=ax)\n xtitle = xtitle or \"y(t)\"\n ytitle = ytitle or \"y(t+{})\".format(short_freq or 1)\n\n if hlines_y is not None:\n hlines_y = tsutils.make_list(hlines_y)\n hlines_xmin = tsutils.make_list(hlines_xmin)\n hlines_xmax = tsutils.make_list(hlines_xmax)\n hlines_colors = tsutils.make_list(hlines_colors)\n hlines_linestyles = tsutils.make_list(hlines_linestyles)\n nxlim = ax.get_xlim()\n if hlines_xmin is None:\n hlines_xmin = nxlim[0]\n if hlines_xmax is None:\n hlines_xmax = nxlim[1]\n if vlines_x is not None:\n vlines_x = tsutils.make_list(vlines_x)\n vlines_ymin = tsutils.make_list(vlines_ymin)\n vlines_ymax = tsutils.make_list(vlines_ymax)\n vlines_colors = tsutils.make_list(vlines_colors)\n vlines_linestyles = tsutils.make_list(vlines_linestyles)\n nylim = ax.get_ylim()\n if vlines_ymin is None:\n vlines_ymin = nylim[0]\n if vlines_ymax is None:\n vlines_ymax = nylim[1]\n if type in [\n \"time\",\n \"xy\",\n \"bar\",\n \"bar_stacked\",\n \"histogram\",\n \"norm_xaxis\",\n \"lognorm_xaxis\",\n \"weibull_xaxis\",\n \"norm_yaxis\",\n \"lognorm_yaxis\",\n \"weibull_yaxis\",\n ]:\n if hlines_y is not None:\n if type in [\"norm_yaxis\", \"lognorm_yaxis\", \"weibull_yaxis\"]:\n hlines_y = ppf(tsutils.make_list(hlines_y))\n plt.hlines(\n hlines_y,\n hlines_xmin,\n hlines_xmax,\n colors=hlines_colors,\n linestyles=hlines_linestyles,\n )\n if vlines_x is not None:\n if type in [\"norm_xaxis\", \"lognorm_xaxis\", \"weibull_xaxis\"]:\n vlines_x = ppf(tsutils.make_list(vlines_x))\n plt.vlines(\n vlines_x,\n vlines_ymin,\n vlines_ymax,\n colors=vlines_colors,\n linestyles=vlines_linestyles,\n )\n\n plt.xlabel(xtitle)\n plt.ylabel(ytitle)\n\n if invert_xaxis is True:\n plt.gca().invert_xaxis()\n if invert_yaxis is True:\n plt.gca().invert_yaxis()\n\n plt.grid(grid)\n\n plt.title(title)\n plt.tight_layout()\n if ofilename is not None:\n plt.savefig(ofilename)\n return plt\n\n\nlag_plot.__doc__ = lag_plot_cli.__doc__\n", "# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef overlay_taylor_diagram_circles(axes, cax, option):\n \"\"\"\n Overlays circle contours on a Taylor diagram.\n\n Plots circle contours on a Taylor diagram to indicate root mean square\n (RMS) and standard deviation values.\n\n INPUTS:\n axes : data structure containing axes information for Taylor diagram\n cax : handle for plot axes\n option : data structure containing option values. (See\n GET_TAYLOR_DIAGRAM_OPTIONS for more information.)\n option['colrms'] : RMS grid and tick labels color (Default: green)\n option['rincrms'] : Increment spacing for RMS grid\n option['stylerms'] : Linestyle of the RMS grid\n option['tickrms'] : RMS values to plot gridding circles from\n observation point\n option['tickRMSangle'] : Angle for RMS tick labels with the observation\n point (Default: 135 deg.)\n option['wi% dthrms'] : Line width of the RMS grid\n\n option['colstd'] : STD grid and tick labels color (Default: black)\n option['rincstd'] : Increment spacing for STD grid\n option['stylestd'] : Linestyle of the STD grid\n option['tickstd'] : STD values to plot gridding circles from origin\n option['tickstdangle'] : Angle for STD tick labels with the observation\n point (Default: .8)\n option['widthstd'] : Line width of the STD grid\n\n OUTPUTS:\n None.\n\n See also GET_TAYLOR_DIAGRAM_OPTIONS\n\n Author: Peter A. Rochford\n Symplectic, LLC\n www.thesymplectic.com\n [email protected]\n \"\"\"\n th = np.arange(0, 2 * np.pi, np.pi / 150)\n xunit = np.cos(th)\n yunit = np.sin(th)\n\n # now really force points on x/y axes to lie on them exactly\n inds = range(0, len(th), (len(th) - 1) // 4)\n xunit[inds[1:5:2]] = np.zeros(2)\n yunit[inds[0:6:2]] = np.zeros(3)\n\n # DRAW RMS CIRCLES:\n # ANGLE OF THE TICK LABELS\n if option[\"tickrmsangle\"] > 0:\n tickRMSAngle = option[\"tickrmsangle\"]\n else:\n phi = np.arctan2(option[\"tickstd\"][-1], axes[\"dx\"])\n tickRMSAngle = 180 - np.rad2deg(phi)\n\n c82 = np.cos(tickRMSAngle * np.pi / 180)\n s82 = np.sin(tickRMSAngle * np.pi / 180)\n radius = np.sqrt(\n axes[\"dx\"] ** 2 + axes[\"rmax\"] ** 2 - 2 * axes[\"dx\"] * axes[\"rmax\"] * xunit\n )\n\n # Define label format\n labelFormat = \"{\" + option[\"rmslabelformat\"] + \"}\"\n\n for iradius in option[\"tickrms\"]:\n phi = th[np.where(radius >= iradius)]\n phi = phi[0]\n ig = np.where(iradius * np.cos(th) + axes[\"dx\"] <= axes[\"rmax\"] * np.cos(phi))\n hhh = plt.plot(\n xunit[ig] * iradius + axes[\"dx\"],\n yunit[ig] * iradius,\n linestyle=option[\"stylerms\"],\n color=option[\"colrms\"],\n linewidth=option[\"widthrms\"],\n )\n if option[\"showlabelsrms\"] == \"on\":\n xtextpos = (iradius + option[\"rincrms\"] / 20) * c82 + axes[\"dx\"]\n ytextpos = (iradius + option[\"rincrms\"] / 20) * s82\n plt.text(\n xtextpos,\n ytextpos,\n \" \" + labelFormat.format(iradius),\n verticalalignment=\"baseline\",\n color=option[\"colrms\"],\n rotation=tickRMSAngle - 90,\n )\n\n # DRAW STD CIRCLES:\n # draw radial circles\n for i in option[\"tickstd\"]:\n hhh = plt.plot(\n xunit * i,\n yunit * i,\n linestyle=option[\"stylestd\"],\n color=option[\"colstd\"],\n linewidth=option[\"widthstd\"],\n )\n if option[\"showlabelsstd\"] == \"on\":\n if option[\"numberpanels\"] == 2:\n if len(np.where(option[\"tickstd\"] == 0)) == 0:\n plt.text(\n 0,\n -axes[\"rinc\"] / 20,\n \"0\",\n verticalalignment=\"top\",\n horizontalalignment=\"center\",\n color=option[\"colstd\"],\n )\n plt.text(\n i,\n -axes[\"rinc\"] / 20,\n str(i),\n verticalalignment=\"top\",\n horizontalalignment=\"center\",\n color=option[\"colstd\"],\n )\n plt.text(\n -i,\n -axes[\"rinc\"] / 20,\n str(i),\n verticalalignment=\"top\",\n horizontalalignment=\"center\",\n color=option[\"colstd\"],\n )\n else:\n if len(np.where(option[\"tickstd\"] == 0)) == 0:\n plt.text(\n -axes[\"rinc\"] / 20,\n axes[\"rinc\"] / 20,\n \"0\",\n verticalalignment=\"center\",\n horizontalalignment=\"right\",\n color=option[\"colstd\"],\n )\n plt.text(\n -axes[\"rinc\"] / 20,\n i,\n str(i),\n verticalalignment=\"center\",\n horizontalalignment=\"right\",\n color=option[\"colstd\"],\n )\n\n hhh[0].set_linestyle(\"-\") # Make outermost STD circle solid\n\n # Draw circle for outer boundary\n i = option[\"axismax\"]\n hhh = plt.plot(\n xunit * i,\n yunit * i,\n linestyle=option[\"stylestd\"],\n color=option[\"colstd\"],\n linewidth=option[\"widthstd\"],\n )\n" ]
[ [ "numpy.mean", "numpy.sum" ], [ "matplotlib.use", "pandas.plotting.lag_plot" ], [ "numpy.sqrt", "numpy.arange", "numpy.cos", "numpy.rad2deg", "numpy.sin", "matplotlib.pyplot.plot", "numpy.arctan2", "matplotlib.pyplot.text", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dobraczka/pystow
[ "d7d66bf87dc3eeb266f6020621649ca8b68be6a4" ]
[ "tests/test_utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Tests for utilities.\"\"\"\n\nimport hashlib\nimport os\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nfrom pystow.utils import (\n HexDigestError,\n download,\n getenv_path,\n mkdir,\n mock_envvar,\n n,\n name_from_url,\n read_tarfile_csv,\n read_zip_np,\n read_zipfile_csv,\n write_tarfile_csv,\n write_zipfile_csv,\n write_zipfile_np,\n)\n\nHERE = Path(__file__).resolve().parent\nTEST_TXT = HERE.joinpath(\"resources\", \"test.txt\")\n\n\nclass TestUtils(unittest.TestCase):\n \"\"\"Test utility functions.\"\"\"\n\n def test_name_from_url(self):\n \"\"\"Test :func:`name_from_url`.\"\"\"\n data = [\n (\"test.tsv\", \"https://example.com/test.tsv\"),\n (\"test.tsv\", \"https://example.com/deeper/test.tsv\"),\n (\"test.tsv.gz\", \"https://example.com/deeper/test.tsv.gz\"),\n ]\n for name, url in data:\n with self.subTest(name=name, url=url):\n self.assertEqual(name, name_from_url(url))\n\n def test_mkdir(self):\n \"\"\"Test for ensuring a directory.\"\"\"\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n subdirectory = directory / \"sd1\"\n self.assertFalse(subdirectory.exists())\n\n mkdir(subdirectory, ensure_exists=False)\n self.assertFalse(subdirectory.exists())\n\n mkdir(subdirectory, ensure_exists=True)\n self.assertTrue(subdirectory.exists())\n\n def test_mock_envvar(self):\n \"\"\"Test that environment variables can be mocked properly.\"\"\"\n name, value = n(), n()\n\n self.assertNotIn(name, os.environ)\n with mock_envvar(name, value):\n self.assertIn(name, os.environ)\n self.assertEqual(value, os.getenv(name))\n self.assertNotIn(name, os.environ)\n\n def test_getenv_path(self):\n \"\"\"Test that :func:`getenv_path` works properly.\"\"\"\n envvar = n()\n\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n value = directory / n()\n default = directory / n()\n\n self.assertEqual(default, getenv_path(envvar, default))\n with mock_envvar(envvar, value.as_posix()):\n self.assertEqual(value, getenv_path(envvar, default))\n # Check that it goes back\n self.assertEqual(default, getenv_path(envvar, default))\n\n def test_compressed_io(self):\n \"\"\"Test that the read/write to compressed folder functions work.\"\"\"\n rows = [[1, 2], [3, 4], [5, 6]]\n columns = [\"A\", \"B\"]\n df = pd.DataFrame(rows, columns=columns)\n inner_path = \"okay.tsv\"\n\n data = [\n (\"test.zip\", write_zipfile_csv, read_zipfile_csv),\n (\"test.tar.gz\", write_tarfile_csv, read_tarfile_csv),\n ]\n for name, writer, reader in data:\n with self.subTest(name=name), tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n path = directory / name\n self.assertFalse(path.exists())\n writer(df, path=path, inner_path=inner_path)\n self.assertTrue(path.exists())\n new_df = reader(path=path, inner_path=inner_path)\n self.assertEqual(list(df.columns), list(new_df.columns))\n self.assertEqual(df.values.tolist(), new_df.values.tolist())\n\n def test_numpy_io(self):\n \"\"\"Test IO with numpy.\"\"\"\n arr = np.array([[0, 1], [2, 3]])\n inner_path = \"okay.npz\"\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n path = directory / \"test.zip\"\n write_zipfile_np(arr, inner_path=inner_path, path=path)\n reloaded_arr = read_zip_np(path=path, inner_path=inner_path)\n self.assertTrue(np.array_equal(arr, reloaded_arr))\n\n\nclass TestHashing(unittest.TestCase):\n \"\"\"Tests for hexdigest checking.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Set up a test.\"\"\"\n self.directory = tempfile.TemporaryDirectory()\n self.path = Path(self.directory.name).joinpath(\"test.tsv\")\n\n md5 = hashlib.md5() # noqa:S303\n with TEST_TXT.open(\"rb\") as file:\n md5.update(file.read())\n self.expected_md5 = md5.hexdigest()\n self.mismatching_md5_hexdigest = \"yolo\"\n self.assertNotEqual(self.mismatching_md5_hexdigest, self.expected_md5)\n\n def tearDown(self) -> None:\n \"\"\"Tear down a test.\"\"\"\n self.directory.cleanup()\n\n def test_hash_success(self):\n \"\"\"Test checking actually works.\"\"\"\n self.assertFalse(self.path.exists())\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n )\n\n def test_hash_error(self):\n \"\"\"Test hash error on download.\"\"\"\n self.assertFalse(self.path.exists())\n with self.assertRaises(HexDigestError):\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.mismatching_md5_hexdigest,\n },\n )\n\n def test_override_hash_error(self):\n \"\"\"Test hash error on download.\"\"\"\n self.path.write_text(\"test file content\")\n\n self.assertTrue(self.path.exists())\n with self.assertRaises(HexDigestError):\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n force=False,\n )\n\n def test_force(self):\n \"\"\"Test overwriting wrong file.\"\"\"\n # now if force=True it should not bother with the hash check\n self.path.write_text(\"test file content\")\n\n self.assertTrue(self.path.exists())\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n force=True,\n )\n" ]
[ [ "numpy.array_equal", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
exchhattu/MolPro-pyTorch
[ "8dfeb0be4b855741fbc66396a27d7ae0607e161d" ]
[ "Data.py" ]
[ "'''\nWritten by: Rojan Shrestha PhD\nMon Nov 18 17:35:38 2019\n'''\n\nimport sys, os, errno\n\nimport numpy as np\n\nimport flowio # pip install FlowIO\n\nclass FCdata:\n \n def __init__(self, path_to_dir, path_to_label_data, path_to_marker):\n \"\"\"\n \n Params:\n st_path_to_file: \n st_path_to_label: \n st_path_to_marker: \n \"\"\"\n self._ma_data = {}\n\n self._Xtrains = []\n self._Xvalids = []\n self._Xtest = []\n self._Ytrains = []\n self._Yvalids = []\n self._Ytest = []\n\n # get ...\n oj_idata = self.iData(path_to_dir, path_to_label_data, path_to_marker)\n print(\"coding: 23 \", oj_idata._ts_samples)\n print(\"coding: 12 \", oj_idata._ts_phenotypes) \n # for st_path, st_label in oj_idata._ma_labels.items():\n # print(\"Coding: \", st_path, st_label)\n # ar_events, ts_channels = oj_idata.read_flowdata(st_path, \n # markers = oj_idata._ts_markers, \n # transform=None, \n # auto_comp=False)\n # self._ma_data[st_path] = ar_events\n\n\n def load_data(self): \n \"\"\"\n \n \"\"\"\n in_num_sample = len(self._ma_labels)\n in_train_sample = int(0.70*in_num_sample)\n in_valid_sample = int(0.15*in_num_sample)\n in_test_sample = int(0.15*in_num_sample)\n\n ar_idx = np.random.permutation(in_num_sample)\n ar_keys = self._ma_labels.keys()\n\n ar_keys[ar_idx[:in_train_sample]]\n ar_idx[in_train_sample:in_train_sample+in_valid_sample]\n ar_idx[-in_test_sample:]\n self._Xtrains = []\n self._Xvalids = []\n self._Xtest = []\n self._Ytrains = []\n self._Yvalids = []\n self._Ytest = []\n\n # return ...\n\n def combine_samples(self, data_list, sample_id):\n \"\"\"\n Aims: merge multiple samples together, which is identified by their sample id.\n index of data_list and sample_id should be synchronized.\n Params: \n data_list - list of sample data\n sample_id - list of sample ids \n \"\"\"\n accum_x, accum_y = [], []\n for x, y in zip(data_list, sample_id):\n accum_x.append(x)\n accum_y.append(y * np.ones(x.shape[0], dtype=int))\n return np.vstack(accum_x), np.hstack(accum_y)\n\n def generate_subsets(self, X, pheno_map, ts_sample_ids, nsubsets=1000, \n ncell=200, per_sample=False, k_init=False):\n \"\"\"\n Aims: generates the data ready for pytorch model. This data generation\n is very problem specific. Each patient has nsubsets data and \n each contains ncell.\n\n Params:\n\n \"\"\"\n S = dict()\n n_unique_sample = len(np.unique(ts_sample_ids))\n\n # create N subset samples for each patient. each subset contains \n # N randomly selected cells \n for n_sample_id in range(n_unique_sample):\n X_i = X[np.where(ts_sample_ids == n_sample_id)]\n S[n_sample_id] = per_sample_subsets(X_i, nsubsets, ncell, k_init)\n # contains 3D data\n\n # interesting going here - onward data will not keep track of patient\n # information instead there will be phenotype. Since S.values() is\n # three dimensional array, patient specific data is not mingled with\n # others\n data_list, y_list = [], []\n for y_i, x_i in S.items(): # y_i: patient ids and x_i: their corresponding cells \n data_list.append(x_i)\n y_list.append(pheno_map[y_i] * np.ones(x_i.shape[0], dtype=int))\n\n Xt = np.vstack(data_list)\n yt = np.hstack(y_list)\n Xt, yt = sku.shuffle(Xt, yt)\n return Xt, yt\n\n def per_sample_subsets(self, X, nsubsets, ncell_per_subset, k_init=False):\n \"\"\"\n Aims: prepare the dimension ready to input the deep learning model\n\n Params:\n \n \"\"\"\n nmark = X.shape[1]\n shape = (nsubsets, nmark, ncell_per_subset)\n Xres = np.zeros(shape)\n\n if not k_init:\n for i in range(nsubsets):\n X_i = random_subsample(X, ncell_per_subset)\n Xres[i] = X_i.T\n else:\n for i in range(nsubsets):\n X_i = random_subsample(X, 2000)\n X_i = kmeans_subsample(X_i, ncell_per_subset, random_state=i)\n Xres[i] = X_i.T\n return Xres\n\n\n class iData:\n \n def __init__(self, path_to_dir, path_to_label, path_to_marker, cofactor=5):\n\n self._ma_labels = dict() \n self._ts_markers = []\n\n self.read_labels(path_to_label) # label either positive or neutral\n self.read_markers(path_to_marker) # marker of each cell\n\n self._ts_samples = []\n self._ts_phenotypes = []\n\n # read all files with suffix .fcs from the given directory.\n for fname, flabel in self._ma_labels.items():\n full_path = os.path.join(path_to_dir, fname)\n ar_events, ts_channels = self.read_flowdata(full_path, transform=None, auto_comp=False)\n\n ts_marker_idx = [ts_channels.index(name) for name in self._ts_markers]\n x = ar_events[:, ts_marker_idx]\n x = np.arcsinh(1./cofactor * x)\n self._ts_samples.append(x)\n self._ts_phenotypes.append(flabel)\n\n def read_labels(self, path_to_label):\n \"\"\"\n Read the label of each mass cytometry file and store into dictionary\n\n Params:\n path_to_label: path to label file\n \"\"\"\n\n if os.path.exists(path_to_label):\n with open(path_to_label, \"r\") as oj_path:\n ts_fcm_files = oj_path.read().split(\"\\n\")\n for st_fcm_file in ts_fcm_files:\n if not st_fcm_file: continue\n ts_parts = st_fcm_file.split(\",\")\n if ts_parts[0] == 'fcs_filename' and ts_parts[1] == 'label': continue\n self._ma_labels[ts_parts[0]] = ts_parts[1]\n else: \n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path_to_label)\n\n\n def read_markers(self, path_to_marker):\n \"\"\"\n Read markers and store into list \n\n Params:\n path_to_marker: path to marker file\n \"\"\"\n if os.path.exists(path_to_marker):\n with open(path_to_marker, \"r\") as oj_path:\n ts_markers = oj_path.read().split(\"\\n\")[0].split(\",\")\n self._ts_markers = [st_marker for st_marker in ts_markers if st_marker]\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path_to_label)\n\n \n def read_flowdata(self, path_to_file, *args, **kwargs):\n \"\"\"\n Aims:\n\n Params:\n path_to_file: path to file fcs \n markers: list of selected markers \n\n Returns: \n \"\"\"\n # st_fupath = os.path.join(path_to_dir, path_to_file) \n print(\"Coding:\", path_to_file)\n oj_f = flowio.FlowData(path_to_file)\n ar_events = np.reshape(oj_f.events, (-1, oj_f.channel_count))\n \n ts_channels = []\n for i in range(1, oj_f.channel_count+1):\n key = str(i)\n if 'PnS' in oj_f.channels[key] and oj_f.channels[key]['PnS'] != u' ':\n ts_channels.append(oj_f.channels[key]['PnS'])\n elif 'PnN' in oj_f.channels[key] and oj_f.channels[key]['PnN'] != u' ':\n ts_channels.append(oj_f.channels[key]['PnN'])\n else:\n ts_channels.append('None')\n\n return ar_events, ts_channels\n \n ### def load_data(path_to_dir)\n ### \"\"\"\n ### Aims: read the files from given directory\n \n ### Params:\n ### path_to_dir: path to directory where files are located\n ### \"\"\"\n \n ### ts_files = os.listdir(path_to_dir)\n ### if not ts_files:\n ### print(\"[FATAL]: no files in %s\" %path_to_dir)\n ### sys.exit(0)\n \n ### for st_file in ts_files:\n ### if st_file.endswith(\".fcs\"):\n\ndef test():\n path_to_dir = \"./data/gated_NK/\" \n path_to_label = \"./data/NK_fcs_samples_with_labels.csv\"\n path_to_marker = \"./data/nk_marker.csv\"\n o_fc_data = FCdata(path_to_dir, path_to_label, path_to_marker) \n\n\n\ntest()\n\n\n" ]
[ [ "numpy.hstack", "numpy.arcsinh", "numpy.unique", "numpy.reshape", "numpy.ones", "numpy.random.permutation", "numpy.zeros", "numpy.where", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RasmusVestiH/RV_cds_language2
[ "61472e7cb385c6ca578dce2f4301fb27666e058b" ]
[ "as5env/lib/python3.6/site-packages/spacy/training/initialize.py" ]
[ "from typing import Union, Dict, Optional, Any, IO, TYPE_CHECKING\nfrom thinc.api import Config, fix_random_seed, set_gpu_allocator\nfrom thinc.api import ConfigValidationError\nfrom pathlib import Path\nimport srsly\nimport numpy\nimport tarfile\nimport gzip\nimport zipfile\nimport tqdm\n\nfrom .pretrain import get_tok2vec_ref\nfrom ..lookups import Lookups\nfrom ..vectors import Vectors\nfrom ..errors import Errors, Warnings\nfrom ..schemas import ConfigSchemaTraining\nfrom ..util import registry, load_model_from_config, resolve_dot_names, logger\nfrom ..util import load_model, ensure_path, get_sourced_components\nfrom ..util import OOV_RANK, DEFAULT_OOV_PROB\n\nif TYPE_CHECKING:\n from ..language import Language # noqa: F401\n\n\ndef init_nlp(config: Config, *, use_gpu: int = -1) -> \"Language\":\n raw_config = config\n config = raw_config.interpolate()\n if \"seed\" not in config[\"training\"]:\n raise ValueError(Errors.E1015.format(value=\"[training] seed\"))\n if \"gpu_allocator\" not in config[\"training\"]:\n raise ValueError(Errors.E1015.format(value=\"[training] gpu_allocator\"))\n if config[\"training\"][\"seed\"] is not None:\n fix_random_seed(config[\"training\"][\"seed\"])\n allocator = config[\"training\"][\"gpu_allocator\"]\n if use_gpu >= 0 and allocator:\n set_gpu_allocator(allocator)\n # Use original config here before it's resolved to functions\n sourced = get_sourced_components(config)\n nlp = load_model_from_config(raw_config, auto_fill=True)\n logger.info(\"Set up nlp object from config\")\n config = nlp.config.interpolate()\n # Resolve all training-relevant sections using the filled nlp config\n T = registry.resolve(config[\"training\"], schema=ConfigSchemaTraining)\n dot_names = [T[\"train_corpus\"], T[\"dev_corpus\"]]\n if not isinstance(T[\"train_corpus\"], str):\n raise ConfigValidationError(\n desc=Errors.E897.format(\n field=\"training.train_corpus\", type=type(T[\"train_corpus\"])\n )\n )\n if not isinstance(T[\"dev_corpus\"], str):\n raise ConfigValidationError(\n desc=Errors.E897.format(\n field=\"training.dev_corpus\", type=type(T[\"dev_corpus\"])\n )\n )\n train_corpus, dev_corpus = resolve_dot_names(config, dot_names)\n optimizer = T[\"optimizer\"]\n # Components that shouldn't be updated during training\n frozen_components = T[\"frozen_components\"]\n # Sourced components that require resume_training\n resume_components = [p for p in sourced if p not in frozen_components]\n logger.info(f\"Pipeline: {nlp.pipe_names}\")\n if resume_components:\n with nlp.select_pipes(enable=resume_components):\n logger.info(f\"Resuming training for: {resume_components}\")\n nlp.resume_training(sgd=optimizer)\n # Make sure that listeners are defined before initializing further\n nlp._link_components()\n with nlp.select_pipes(disable=[*frozen_components, *resume_components]):\n nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer)\n logger.info(f\"Initialized pipeline components: {nlp.pipe_names}\")\n # Detect components with listeners that are not frozen consistently\n for name, proc in nlp.pipeline:\n if getattr(proc, \"listening_components\", None): # e.g. tok2vec/transformer\n for listener in proc.listening_components:\n if listener in frozen_components and name not in frozen_components:\n logger.warning(Warnings.W087.format(name=name, listener=listener))\n # We always check this regardless, in case user freezes tok2vec\n if listener not in frozen_components and name in frozen_components:\n logger.warning(Warnings.W086.format(name=name, listener=listener))\n return nlp\n\n\ndef init_vocab(\n nlp: \"Language\",\n *,\n data: Optional[Path] = None,\n lookups: Optional[Lookups] = None,\n vectors: Optional[str] = None,\n) -> \"Language\":\n if lookups:\n nlp.vocab.lookups = lookups\n logger.info(f\"Added vocab lookups: {', '.join(lookups.tables)}\")\n data_path = ensure_path(data)\n if data_path is not None:\n lex_attrs = srsly.read_jsonl(data_path)\n for lexeme in nlp.vocab:\n lexeme.rank = OOV_RANK\n for attrs in lex_attrs:\n if \"settings\" in attrs:\n continue\n lexeme = nlp.vocab[attrs[\"orth\"]]\n lexeme.set_attrs(**attrs)\n if len(nlp.vocab):\n oov_prob = min(lex.prob for lex in nlp.vocab) - 1\n else:\n oov_prob = DEFAULT_OOV_PROB\n nlp.vocab.cfg.update({\"oov_prob\": oov_prob})\n logger.info(f\"Added {len(nlp.vocab)} lexical entries to the vocab\")\n logger.info(\"Created vocabulary\")\n if vectors is not None:\n load_vectors_into_model(nlp, vectors)\n logger.info(f\"Added vectors: {vectors}\")\n logger.info(\"Finished initializing nlp object\")\n\n\ndef load_vectors_into_model(\n nlp: \"Language\", name: Union[str, Path], *, add_strings: bool = True\n) -> None:\n \"\"\"Load word vectors from an installed model or path into a model instance.\"\"\"\n try:\n vectors_nlp = load_model(name)\n except ConfigValidationError as e:\n title = f\"Config validation error for vectors {name}\"\n desc = (\n \"This typically means that there's a problem in the config.cfg included \"\n \"with the packaged vectors. Make sure that the vectors package you're \"\n \"loading is compatible with the current version of spaCy.\"\n )\n err = ConfigValidationError.from_error(e, title=title, desc=desc)\n raise err from None\n nlp.vocab.vectors = vectors_nlp.vocab.vectors\n if add_strings:\n # I guess we should add the strings from the vectors_nlp model?\n # E.g. if someone does a similarity query, they might expect the strings.\n for key in nlp.vocab.vectors.key2row:\n if key in vectors_nlp.vocab.strings:\n nlp.vocab.strings.add(vectors_nlp.vocab.strings[key])\n\n\ndef init_tok2vec(\n nlp: \"Language\", pretrain_config: Dict[str, Any], init_config: Dict[str, Any]\n) -> bool:\n # Load pretrained tok2vec weights - cf. CLI command 'pretrain'\n P = pretrain_config\n I = init_config\n weights_data = None\n init_tok2vec = ensure_path(I[\"init_tok2vec\"])\n if init_tok2vec is not None:\n if not init_tok2vec.exists():\n err = f\"can't find pretrained tok2vec: {init_tok2vec}\"\n errors = [{\"loc\": [\"initialize\", \"init_tok2vec\"], \"msg\": err}]\n raise ConfigValidationError(config=nlp.config, errors=errors)\n with init_tok2vec.open(\"rb\") as file_:\n weights_data = file_.read()\n if weights_data is not None:\n layer = get_tok2vec_ref(nlp, P)\n layer.from_bytes(weights_data)\n logger.info(f\"Loaded pretrained weights from {init_tok2vec}\")\n return True\n return False\n\n\ndef convert_vectors(\n nlp: \"Language\",\n vectors_loc: Optional[Path],\n *,\n truncate: int,\n prune: int,\n name: Optional[str] = None,\n) -> None:\n vectors_loc = ensure_path(vectors_loc)\n if vectors_loc and vectors_loc.parts[-1].endswith(\".npz\"):\n nlp.vocab.vectors = Vectors(data=numpy.load(vectors_loc.open(\"rb\")))\n for lex in nlp.vocab:\n if lex.rank and lex.rank != OOV_RANK:\n nlp.vocab.vectors.add(lex.orth, row=lex.rank)\n else:\n if vectors_loc:\n logger.info(f\"Reading vectors from {vectors_loc}\")\n vectors_data, vector_keys = read_vectors(vectors_loc, truncate)\n logger.info(f\"Loaded vectors from {vectors_loc}\")\n else:\n vectors_data, vector_keys = (None, None)\n if vector_keys is not None:\n for word in vector_keys:\n if word not in nlp.vocab:\n nlp.vocab[word]\n if vectors_data is not None:\n nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)\n if name is None:\n # TODO: Is this correct? Does this matter?\n nlp.vocab.vectors.name = f\"{nlp.meta['lang']}_{nlp.meta['name']}.vectors\"\n else:\n nlp.vocab.vectors.name = name\n nlp.meta[\"vectors\"][\"name\"] = nlp.vocab.vectors.name\n if prune >= 1:\n nlp.vocab.prune_vectors(prune)\n\n\ndef read_vectors(vectors_loc: Path, truncate_vectors: int):\n f = ensure_shape(vectors_loc)\n shape = tuple(int(size) for size in next(f).split())\n if truncate_vectors >= 1:\n shape = (truncate_vectors, shape[1])\n vectors_data = numpy.zeros(shape=shape, dtype=\"f\")\n vectors_keys = []\n for i, line in enumerate(tqdm.tqdm(f)):\n line = line.rstrip()\n pieces = line.rsplit(\" \", vectors_data.shape[1])\n word = pieces.pop(0)\n if len(pieces) != vectors_data.shape[1]:\n raise ValueError(Errors.E094.format(line_num=i, loc=vectors_loc))\n vectors_data[i] = numpy.asarray(pieces, dtype=\"f\")\n vectors_keys.append(word)\n if i == truncate_vectors - 1:\n break\n return vectors_data, vectors_keys\n\n\ndef open_file(loc: Union[str, Path]) -> IO:\n \"\"\"Handle .gz, .tar.gz or unzipped files\"\"\"\n loc = ensure_path(loc)\n if tarfile.is_tarfile(str(loc)):\n return tarfile.open(str(loc), \"r:gz\")\n elif loc.parts[-1].endswith(\"gz\"):\n return (line.decode(\"utf8\") for line in gzip.open(str(loc), \"r\"))\n elif loc.parts[-1].endswith(\"zip\"):\n zip_file = zipfile.ZipFile(str(loc))\n names = zip_file.namelist()\n file_ = zip_file.open(names[0])\n return (line.decode(\"utf8\") for line in file_)\n else:\n return loc.open(\"r\", encoding=\"utf8\")\n\n\ndef ensure_shape(vectors_loc):\n \"\"\"Ensure that the first line of the data is the vectors shape.\n If it's not, we read in the data and output the shape as the first result,\n so that the reader doesn't have to deal with the problem.\n \"\"\"\n lines = open_file(vectors_loc)\n first_line = next(lines)\n try:\n shape = tuple(int(size) for size in first_line.split())\n except ValueError:\n shape = None\n if shape is not None:\n # All good, give the data\n yield first_line\n yield from lines\n else:\n # Figure out the shape, make it the first value, and then give the\n # rest of the data.\n width = len(first_line.split()) - 1\n length = 1\n for _ in lines:\n length += 1\n yield f\"{length} {width}\"\n # Reading the lines in again from file. This to avoid having to\n # store all the results in a list in memory\n lines2 = open_file(vectors_loc)\n yield from lines2\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FabricioSMarin/XRFtomo
[ "f5c9f6411bea9a85082d943af873e83f81431e5a" ]
[ "xrftomo/widgets/reconstruction.py" ]
[ "# #########################################################################\n# Copyright © 2020, UChicago Argonne, LLC. All Rights Reserved. #\n# #\n# Software Name: XRFtomo #\n# #\n# By: Argonne National Laboratory #\n# #\n# OPEN SOURCE LICENSE #\n# #\n# Redistribution and use in source and binary forms, with or without #\n# modification, are permitted provided that the following conditions #\n# are met: #\n# #\n# 1. Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# 2. Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# 3. Neither the name of the copyright holder nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# DISCLAIMER #\n# #\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #\n###########################################################################\n\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtCore import pyqtSignal\nimport xrftomo\nimport pyqtgraph\nimport numpy as np\n\nclass ReconstructionWidget(QtWidgets.QWidget):\n elementChangedSig = pyqtSignal(int, name='elementChangedSig')\n sldRangeChanged = pyqtSignal(int, np.ndarray, np.ndarray, name='sldRangeChanged')\n reconChangedSig = pyqtSignal(np.ndarray, name='reconChangedSig')\n reconArrChangedSig = pyqtSignal(np.ndarray, name='reconArrChangedSig')\n\n def __init__(self):\n super(ReconstructionWidget, self).__init__()\n self.initUI()\n\n def initUI(self):\n self.ViewControl = xrftomo.ReconstructionControlsWidget()\n self.ReconView = xrftomo.ReconView(self)\n self.actions = xrftomo.ReconstructionActions()\n self.actions2 = xrftomo.ImageProcessActions()\n self.writer = xrftomo.SaveOptions()\n\n self.file_name_title = QtWidgets.QLabel(\"_\")\n lbl1 = QtWidgets.QLabel(\"x pos:\")\n self.lbl2 = QtWidgets.QLabel(\"\")\n lbl3 = QtWidgets.QLabel(\"y pos:\")\n self.lbl4 = QtWidgets.QLabel(\"\")\n lbl5 = QtWidgets.QLabel(\"Slice\")\n lbl6 = QtWidgets.QLabel(\"value:\")\n self.lbl7 = QtWidgets.QLabel(\"\")\n\n self.ReconView.mouseMoveSig.connect(self.updatePanel)\n #get pixel value from Histogram widget's projview \n\n self.sld = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)\n self.lcd = QtWidgets.QLCDNumber(self)\n self.hist = pyqtgraph.HistogramLUTWidget()\n self.hist.setMinimumSize(120,120)\n self.hist.setMaximumWidth(120)\n self.hist.setImageItem(self.ReconView.projView)\n\n self.ViewControl.combo1.currentIndexChanged.connect(self.elementChanged)\n self.ViewControl.reconGroup.currentIndexChanged.connect(self.recon_combobox_changed)\n self.ViewControl.btn.clicked.connect(self.reconstruct_params)\n self.ViewControl.equalizeBtn.clicked.connect(self.equalize_params)\n self.ViewControl.rmHotspotBtn.clicked.connect(self.rm_hotspot_params)\n self.ViewControl.setThreshBtn.clicked.connect(self.set_thresh_params)\n\n\n self.ViewControl.btn2.clicked.connect(self.reconstruct_all_params)\n self.ViewControl.recon2npy.clicked.connect(self.reconstruct_all_npy_params)\n self.ViewControl.mulBtn.clicked.connect(self.call_reconMultiply)\n self.ViewControl.divBtn.clicked.connect(self.call_reconDivide)\n self.ViewControl.end_indx.editingFinished.connect(self.update_y_range)\n self.ViewControl.start_indx.editingFinished.connect(self.update_y_range)\n self.ViewControl.mid_indx.editingFinished.connect(self.update_middle_index)\n self.ViewControl.recon_stats.clicked.connect(self.toggle_middle_index)\n self.sld.valueChanged.connect(self.update_recon_image)\n\n self.x_shifts = None\n self.y_shifts = None\n self.centers = None\n self.recon = None\n self.recon_array = None\n self.data = None\n self.data_original = None\n\n hb0 = QtWidgets.QHBoxLayout()\n hb0.addWidget(lbl1)\n hb0.addWidget(self.lbl2)\n hb0.addWidget(lbl3)\n hb0.addWidget(self.lbl4)\n hb0.addWidget(lbl6)\n hb0.addWidget(self.lbl7)\n\n hb1 = QtWidgets.QHBoxLayout()\n hb1.addWidget(lbl5)\n hb1.addWidget(self.lcd)\n hb1.addWidget(self.sld)\n\n vb1 = QtWidgets.QVBoxLayout()\n vb1.addWidget(self.file_name_title)\n vb1.addLayout(hb0)\n vb1.addWidget(self.ReconView)\n vb1.addLayout(hb1)\n\n hb2 = QtWidgets.QHBoxLayout()\n hb2.addWidget(self.ViewControl)\n hb2.addLayout(vb1)\n hb2.addWidget(self.hist, 10)\n\n self.setLayout(hb2)\n\n def updatePanel(self,x,y):\n self.lbl2.setText(str(x))\n self.lbl4.setText(str(y))\n try:\n pixel_val = round(self.view.projView.image[abs(y)-1,x],4)\n self.lbl7.setText(str(pixel_val))\n except:\n self.lbl7.setText(\"\")\n\n def showReconstruct(self):\n '''\n load window for reconstruction window\n '''\n self.write = xrftomo.SaveOptions()\n self.actions.x_shifts = self.x_shifts\n self.actions.y_shifts = self.y_shifts\n self.actions.centers = self.centers\n self.y_range = self.data.shape[2]\n\n self.ViewControl.combo1.clear()\n self.ViewControl.method.clear()\n self.ViewControl.reconGroup.clear()\n self.ViewControl.reconGroup.disconnect()\n methodname = [\"mlem\", \"gridrec\", \"art\", \"pml_hybrid\", \"pml_quad\", \"fbp\", \"sirt\", \"tv\"]\n for j in self.elements:\n self.ViewControl.combo1.addItem(j)\n for k in range(len(methodname)):\n self.ViewControl.method.addItem(methodname[k])\n for l in self.elements:\n self.ViewControl.reconGroup.addItem(l)\n self.recon_array = np.zeros((len(self.elements),self.y_range,self.data.shape[3],self.data.shape[3]))\n\n self.ViewControl.reconGroup.currentIndexChanged.connect(self.recon_combobox_changed)\n self.elementChanged()\n\n #TODO: recon_array will need to update with any changes to data dimensions as well as re-initialization\n # self.ViewControl.centerTextBox.setText(str(self.centers[2]))\n self.ViewControl.mulBtn.setEnabled(False)\n self.ViewControl.divBtn.setEnabled(False)\n self.ViewControl.end_indx.setText((str(self.data.shape[2])))\n self.ViewControl.mid_indx.setText((str(self.data.shape[2]//2)))\n\n self.sld.setRange(0, self.y_range - 1)\n self.lcd.display(0)\n\n def elementChanged(self):\n element = self.ViewControl.combo1.currentIndex()\n self.updateElementSlot(element)\n self.elementChangedSig.emit(element)\n\n def updateElementSlot(self, element):\n self.ViewControl.combo1.setCurrentIndex(element)\n\n def call_reconMultiply(self):\n '''\n multiply reconstruction by 10\n '''\n self.recon = self.actions.reconMultiply(self.recon)\n self.update_recon_image()\n\n def call_reconDivide(self):\n '''\n divide reconstuction by 10\n '''\n self.recon = self.actions.reconDivide(self.recon)\n self.update_recon_image()\n\n def reconstruct_params(self):\n element = self.ViewControl.combo1.currentIndex()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text())) -start_indx - 1\n\n data = self.data[:,:,start_indx:end_indx,:]\n show_stats = self.ViewControl.recon_stats.isChecked()\n num_xsections = data.shape[2]\n\n if self.ViewControl.recon_save.isChecked():\n try:\n savedir = QtGui.QFileDialog.getSaveFileName()[0]\n # savedir = '/Users/fabriciomarin/Documents/scans/Lin_XRF_tomo/Lin_3D2/testing/ptycho'\n\n if savedir == \"\":\n raise IOError\n if savedir == None:\n return\n except IOError:\n print(\"type the header name\")\n except: \n print(\"Something went horribly wrong.\")\n\n #reconstruct one ccross section at a time and save after each loop/completion. \n recons = np.zeros((data.shape[2],data.shape[3], data.shape[3]))\n xsection = np.zeros((1,data.shape[1],1, data.shape[3]))\n start_idx = int(eval(self.ViewControl.start_indx.text()))\n for i in range(num_xsections):\n j = num_xsections-i-1\n xsection[0,:,0] = data[element,:,j]\n recon = self.actions.reconstruct(xsection, 0, center, method, beta, delta, iters, thetas, 0, False)\n recons[i] = recon\n self.writer.save_reconstruction(recon, savedir, start_idx+i)\n self.recon = np.array(recons)\n else:\n self.recon = self.actions.reconstruct(data, element, center, method, beta, delta, iters, thetas, mid_indx, show_stats)\n \n self.ViewControl.mulBtn.setEnabled(True)\n self.ViewControl.divBtn.setEnabled(True)\n self.update_recon_image()\n self.update_recon_array(self.recon)\n self.reconChangedSig.emit(self.recon)\n self.reconArrChangedSig.emit(self.recon_array)\n return\n\n def reconstruct_all_params(self):\n #figure out how to get a list of all selected elements\n num_elements = self.ViewControl.combo1.count()\n element_names = [self.ViewControl.combo1.itemText(i) for i in range(num_elements)]\n # box_checked = self.ViewControl.cbox.isChecked()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text()))\n data = self.data[:,:,start_indx:end_indx,:]\n\n self.recon = self.actions.reconstructAll(data, element_names, center, method, beta, delta, iters, thetas)\n self.ViewControl.mulBtn.setEnabled(True)\n self.ViewControl.divBtn.setEnabled(True)\n self.update_recon_image()\n self.reconChangedSig.emit(self.recon)\n return\n\n def reconstruct_all_npy_params(self):\n #figure out how to get a list of all selected elements\n num_elements = self.ViewControl.combo1.count()\n element_names = [self.ViewControl.combo1.itemText(i) for i in range(num_elements)]\n # box_checked = self.ViewControl.cbox.isChecked()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text()))\n data = self.data[:,:,start_indx:end_indx,:]\n\n #reconArray [element,stack,y,x]\n num_elements = data.shape[0]\n num_slices = data.shape[2]\n slice_dim = data.shape[3]\n\n for i in range(num_elements):\n self.recon = self.actions.reconstruct(data, i, center, method, beta, delta, iters, thetas, mid_indx, False)\n self.recon_array[i] = self.recon\n self.update_recon_image()\n self.reconChangedSig.emit(self.recon)\n self.writer.save_recon_array_2npy(self.recon_array, savedir=None, index=-1)\n return\n\n def ySizeChanged(self, ySize):\n self.ViewControl.start_indx.setText('0')\n self.ViewControl.end_indx.setText(str(ySize))\n self.ViewControl.mid_indx.setText(str(ySize//2))\n self.sld.setValue(0)\n self.sld.setMaximum(ySize)\n self.recon_array = np.zeros((len(self.elements),ySize,self.data.shape[3],self.data.shape[3]))\n #check for xSize too.\n return\n \n def xSizeChanged(self, xSize):\n self.recon_array = np.zeros((len(self.elements),self.data.shape[2],xSize,xSize))\n return\n\n def update_y_range(self):\n start_indx = int(self.ViewControl.start_indx.text())\n end_indx = int(self.ViewControl.end_indx.text())\n if end_indx >self.data.shape[2]:\n end_indx = self.data.shape[2]\n self.ViewControl.end_indx.setText(str(end_indx))\n if end_indx <= 0:\n end_indx = self.data.shape[2]\n self.ViewControl.end_indx.setText(str(end_indx))\n if start_indx >=end_indx:\n self.ViewControl.start_indx.setText(str(end_indx-1))\n if start_indx < 0:\n self.ViewControl.start_indx.setText(str(0))\n self.update_middle_index()\n\n self.sld.setRange(0, end_indx-start_indx - 1)\n self.sld.setValue(0)\n self.lcd.display(0)\n\n def update_middle_index(self):\n start_indx = int(self.ViewControl.start_indx.text())\n end_indx = int(self.ViewControl.end_indx.text())\n mid_indx = int(self.ViewControl.mid_indx.text())\n if mid_indx == -1:\n mid_indx = end_indx//2\n if mid_indx > end_indx:\n mid_indx = end_indx\n self.ViewControl.mid_indx.setText(str(mid_indx))\n if mid_indx < start_indx:\n mid_indx = start_indx\n self.ViewControl.mid_indx.setText(str(mid_indx))\n\n def toggle_middle_index(self):\n if self.ViewControl.recon_stats.isChecked():\n self.ViewControl.mid_indx.setEnabled(True)\n else:\n self.ViewControl.mid_indx.setEnabled(False)\n\n def equalize_params(self):\n recon = self.recon \n recon = self.actions.equalize_recon(recon)\n self.update_recon_image()\n \n def rm_hotspot_params(self):\n recon = self.recon \n recon = self.actions.remove_hotspots(recon)\n self.update_recon_image()\n\n def set_thresh_params(self):\n recon = self.recon \n threshold = float(self.ViewControl.lThresh.text())\n recon = self.actions.setThreshold(threshold,recon)\n self.update_recon_image()\n\n def update_recon_array(self, recon):\n indx = self.ViewControl.combo1.currentIndex() \n #recon could be a partial reconstruction, account for this by indexing the Y range as well \n ymin = int(eval(self.ViewControl.start_indx.text()))\n ymax = int(eval(self.ViewControl.end_indx.text()))\n\n self.recon_array[indx,ymin:ymax,:] = recon\n\n def recon_combobox_changed(self):\n indx = self.ViewControl.reconGroup.currentIndex()\n recon = self.recon_array[indx]\n self.recon = recon\n self.update_recon_image()\n\n def update_recon_image(self):\n index = self.sld.value()\n self.lcd.display(index)\n\n try:\n self.ViewControl.maxText.setText(str(self.recon[index, :, :].max()))\n self.ViewControl.minText.setText(str(self.recon[index, :, :].min()))\n self.ReconView.projView.setImage(self.recon[index, :, :])\n except:\n print(\"run reconstruction first\")\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VasilisNtousis/Hilti_Project_Cyber_Aware
[ "175f9343462175de5d0f24eacf285bd27f12b399" ]
[ "src/create_es_df_to_csv.py" ]
[ "import pandas as pd \n\ndef awareness_dataset():\n sec_df = pd.read_excel('IT Security Awareness Global report.xlsx')\n\n sec_df = sec_df.drop(columns=['Username','FunctionalAreaName','CostCenterName',\n 'Time spent on test','Community name','Content name',\n 'Course specified approver','TradeAssignment(Attribute8)',\n 'SalesOrganizationName','SalesOrganizationCode','FunctionalAreaCode',\n 'FirstName','LastName',\"Creator approver\",\"Manager approver\",\"Specified Approver\",\n \"Approval date\",'Suspend data'])\n\n sec_df[[\"Last login\", \"Last activity\",\"Registration date\", \"Created\",\"Date course completed\"]].apply(\n pd.to_datetime,format=\"%Y-%m-%dT%H:%M:%S\",errors='coerce')\n \n sec_df[\"Last login\"]=sec_df[\"Last login\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Last activity\"] = sec_df[\"Last activity\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Registration date\"] = sec_df[\"Registration date\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Date course completed\"] = sec_df[\"Date course completed\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n\n sec_df.to_csv('hilti.csv',index=False)\n\ndef phising_dataset():\n xls = pd.ExcelFile('../DataPhishingReport.xlsx')\n df1 = pd.read_excel(xls,'Attachments')\n df2 = pd.read_excel(xls,'Blank')\n df3 = pd.read_excel(xls,'Spam')\n df4 = pd.read_excel(xls,\"RealPhishing\")\n df5 = pd.read_excel(xls,\"Internal Phishing\")\n\n df1.insert(3,'Filename','Attachments')\n df2.insert(3,'Filename','Blank')\n df3.insert(3,'Filename','Spam')\n df4.insert(3,'Filename','RealPhising')\n df5.insert(3,'Filename','Internal Phising')\n\n\n df4 = df4.drop(['Sender','TicketNr','More than 1 recipient'],axis=1)\n df4= df4.reindex(['Subject',\"Date\",'UserID','Filename'],axis=1)\n df4.columns = ['Subject','Received','Sender','Filename']\n\n df_list = [df1,df2,df3,df4,df5]\n\n dataset = pd.concat(df_list)\n dataset = dataset.reset_index()\n dataset['Received'] = pd.to_datetime(dataset['Received'], errors='coerce')\n dataset['Received']=dataset['Received'].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n dataset = dataset.drop(['index'],axis=1)\n dataset.to_csv('dataPhising.csv',index=False)\n\nif __name__ == \"__main__\":\n awareness_dataset()\n phising_dataset()" ]
[ [ "pandas.concat", "pandas.read_excel", "pandas.to_datetime", "pandas.ExcelFile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
xinpl/Polaris
[ "2a9717980ab03393f7f5ba646bc58c8a3fbd1b9b" ]
[ "nnExplain/kerasUtils.py" ]
[ "from .NNModel import Layer, LayerKind\nfrom .utils import verbose\nfrom keras.models import load_model\nfrom keras.layers import Dense, Activation, Dropout, InputLayer\nimport tensorflow as tf\n\n\ndef createModelFromKerasModel(m, input_length):\n X = tf.placeholder(tf.float32, shape=[None, input_length])\n layers = m.layers\n layer_list = []\n last_output = X\n for l in layers:\n if isinstance(l,InputLayer):\n continue\n if isinstance(l,Dense):\n weights = l.get_weights()[0]\n biases = l.get_weights()[1]\n last_output = tf.matmul(last_output, tf.constant(weights))\n last_output = tf.add(last_output, tf.constant(biases))\n layer_list.append(Layer(LayerKind.dense, last_output, weights, biases))\n activation = l.get_config()['activation']\n if activation == 'relu':\n last_output = tf.nn.relu(last_output)\n layer_list.append(Layer(LayerKind.relu, last_output, None, None))\n elif activation == 'softmax':\n verbose(\"Warning: treating softmax as the output!\",0)\n elif isinstance(l, Dropout):\n continue\n else:\n raise ValueError(\"Cannot handle layer {}!\".format(l))\n return (m, layer_list, X, last_output)\n\n\ndef createModelFromKerasSave(path, input_length):\n m = load_model(path)\n return createModelFromKerasModel(m, input_length)\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.placeholder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ToWeRT1An/tensorpack
[ "f343e65b3c92fdf92cda7a90e8d7fd9df622b1b1" ]
[ "tensorpack/tfutils/summary.py" ]
[ "# -*- coding: utf-8 -*-\n# File: summary.py\n\n\nimport re\nfrom contextlib import contextmanager\nimport six\nfrom six.moves import range\nfrom tensorflow.python.training import moving_averages\n\nfrom ..compat import tfv1 as tf\nfrom ..utils import logger\nfrom ..utils.argtools import graph_memoized\nfrom ..utils.naming import MOVING_SUMMARY_OPS_KEY\nfrom .scope_utils import cached_name_scope\nfrom .symbolic_functions import rms\nfrom .tower import get_current_tower_context\n\n__all__ = ['add_tensor_summary', 'add_param_summary',\n 'add_activation_summary', 'add_moving_summary',\n ]\n\n\n# some scope stuff to use internally...\n@graph_memoized\ndef _get_cached_vs(name):\n with tf.variable_scope(name) as scope:\n return scope\n\n\n@contextmanager\ndef _enter_vs_reuse_ns(name):\n vs = _get_cached_vs(name)\n # XXX Not good to enter the cached vs directly, because this will clean-up custom getter\n # with tf.variable_scope(name, reuse=tf.AUTO_REUSE): # available in 1.4 only\n with tf.variable_scope(vs):\n with tf.name_scope(vs.original_name_scope):\n yield vs\n\n\ndef create_scalar_summary(name, v):\n \"\"\"\n Args:\n name (str):\n v (float): scalar value\n Returns:\n tf.Summary: a tf.Summary object with name and simple scalar value v.\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n v = float(v)\n s = tf.Summary()\n s.value.add(tag=name, simple_value=v)\n return s\n\n\ndef create_image_summary(name, val):\n \"\"\"\n Args:\n name(str):\n val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.\n Can be either float or uint8. Range has to be [0,255].\n\n Returns:\n tf.Summary:\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n n, h, w, c = val.shape\n val = val.astype('uint8')\n s = tf.Summary()\n imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]\n for k in range(n):\n arr = val[k]\n # CV2 will only write correctly in BGR chanel order\n if c == 3:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)\n elif c == 4:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)\n tag = name if n == 1 else '{}/{}'.format(name, k)\n retval, img_str = cv2.imencode('.png', arr, imparams)\n if not retval:\n # Encoding has failed.\n continue\n img_str = img_str.tostring()\n\n img = tf.Summary.Image()\n img.height = h\n img.width = w\n # 1 - grayscale 3 - RGB 4 - RGBA\n img.colorspace = c\n img.encoded_image_string = img_str\n s.value.add(tag=tag, image=img)\n return s\n\n\ndef add_tensor_summary(x, types, name=None, collections=None,\n main_tower_only=True):\n \"\"\"\n Summarize a tensor by different methods.\n\n Args:\n x (tf.Tensor): a tensor to summarize\n types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms\n name (str): summary name. Defaults to be the op name.\n collections (list[str]): collections of the summary ops.\n main_tower_only (bool): Only run under main training tower. If\n set to True, calling this function under other TowerContext\n has no effect.\n\n Example:\n\n .. code-block:: python\n\n with tf.name_scope('mysummaries'): # to not mess up tensorboard\n add_tensor_summary(\n tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')\n \"\"\"\n types = set(types)\n if name is None:\n name = x.op.name\n ctx = get_current_tower_context()\n if main_tower_only and ctx is not None and not ctx.is_main_training_tower:\n return\n\n SUMMARY_TYPES_DIC = {\n 'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections),\n 'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections),\n 'sparsity': lambda: tf.summary.scalar(\n name + '-sparsity', tf.nn.zero_fraction(x),\n collections=collections),\n 'mean': lambda: tf.summary.scalar(\n name + '-mean', tf.reduce_mean(x),\n collections=collections),\n 'rms': lambda: tf.summary.scalar(\n name + '-rms', rms(x), collections=collections)\n }\n for typ in types:\n SUMMARY_TYPES_DIC[typ]()\n\n\ndef add_activation_summary(x, types=None, name=None, collections=None):\n \"\"\"\n Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n x (tf.Tensor): the tensor to summary.\n types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.\n name (str): if is None, use x.name.\n collections (list[str]): collections of the summary ops.\n \"\"\"\n ndim = x.get_shape().ndims\n if ndim < 2:\n logger.warn(\"Cannot summarize scalar activation {}\".format(x.name))\n return\n if types is None:\n types = ['sparsity', 'rms', 'histogram']\n with cached_name_scope('activation-summary'):\n add_tensor_summary(x, types, name=name, collections=collections)\n\n\ndef add_param_summary(*summary_lists, **kwargs):\n \"\"\"\n Add summary ops for all trainable variables matching the regex, under a\n reused 'param-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n summary_lists (list): each is (regex, [list of summary type]).\n Summary type is defined in :func:`add_tensor_summary`.\n collections (list[str]): collections of the summary ops.\n\n Example:\n\n .. code-block:: python\n\n add_param_summary(\n ('.*/W', ['histogram', 'rms']),\n ('.*/gamma', ['scalar']),\n )\n \"\"\"\n collections = kwargs.pop('collections', None)\n assert len(kwargs) == 0, \"Unknown kwargs: \" + str(kwargs)\n ctx = get_current_tower_context()\n if ctx is not None and not ctx.is_main_training_tower:\n return\n\n params = tf.all_variables()\n\n\n with cached_name_scope('param-summary'):\n for p in params:\n name = p.op.name\n for rgx, actions in summary_lists:\n if not rgx.endswith('$'):\n rgx = rgx + '$'\n if re.match(rgx, name):\n add_tensor_summary(p, actions, name=name, collections=collections)\n\n\ndef add_moving_summary(*args, **kwargs):\n \"\"\"\n Summarize the moving average for scalar tensors.\n This function is a no-op if not calling from main training tower.\n See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html\n\n Args:\n args: scalar tensors to summarize\n decay (float): the decay rate. Defaults to 0.95.\n collection (str or None): the name of the collection to add EMA-maintaining ops.\n The default will work together with the default\n :class:`MovingAverageSummary` callback.\n summary_collections ([str]): the names of collections to add the\n summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`).\n\n Returns:\n [tf.Tensor]: list of tensors returned by assign_moving_average,\n which can be used to maintain the EMA.\n \"\"\"\n decay = kwargs.pop('decay', 0.95)\n coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY)\n summ_coll = kwargs.pop('summary_collections', None)\n assert len(kwargs) == 0, \"Unknown arguments: \" + str(kwargs)\n\n ctx = get_current_tower_context()\n # allow ctx to be none\n if ctx is not None and not ctx.is_main_training_tower:\n return []\n\n graph = tf.get_default_graph()\n try:\n control_flow_ctx = graph._get_control_flow_context()\n # XLA does not support summaries anyway\n # However, this function will generate unnecessary dependency edges,\n # which makes the tower function harder to compile under XLA, so we skip it\n if control_flow_ctx is not None and control_flow_ctx.IsXLAContext():\n return\n except Exception:\n pass\n\n if tf.get_variable_scope().reuse is True:\n logger.warn(\"add_moving_summary() called under reuse=True scope, ignored.\")\n return []\n\n for x in args:\n assert isinstance(x, (tf.Tensor, tf.Variable)), x\n assert x.get_shape().ndims == 0, \\\n \"add_moving_summary() only accepts scalar tensor! Got one with {}\".format(x.get_shape())\n\n ema_ops = []\n for c in args:\n name = re.sub('tower[0-9]+/', '', c.op.name)\n with tf.name_scope(None):\n if not c.dtype.is_floating:\n c = tf.cast(c, tf.float32)\n # assign_moving_average creates variables with op names, therefore clear ns first.\n with _enter_vs_reuse_ns('EMA') as vs:\n ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype,\n initializer=tf.constant_initializer(),\n trainable=False)\n ns = vs.original_name_scope\n with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear\n ema_op = moving_averages.assign_moving_average(\n ema_var, c, decay,\n zero_debias=True, name=name + '_EMA_apply')\n ema_ops.append(ema_op)\n with tf.name_scope(None):\n tf.summary.scalar(\n name + '-summary', ema_op,\n collections=summ_coll) # write the EMA value as a summary\n if coll is not None:\n for op in ema_ops:\n tf.add_to_collection(coll, op)\n return ema_ops\n\n\ntry:\n import cv2\nexcept ImportError:\n from ..utils.develop import create_dummy_func\n create_image_summary = create_dummy_func('create_image_summary', 'cv2') # noqa\n" ]
[ [ "tensorflow.python.training.moving_averages.assign_moving_average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
gaoyuanhezhihao/pyLib
[ "ecc517d12c4125deafa7b7fe09c63afa3349aaac" ]
[ "OneLib/ImageViewer/evaluate_detection.py" ]
[ "import argparse\nimport os\nfrom os.path import split,join,splitext\nimport xml.etree.ElementTree as ET\nimport re\nimport numpy as np\nfrom shapely.geometry import Polygon\nfrom debug_tool import paint_polygons\nfrom matplotlib import pyplot as plt\n\n\n\ndef parse_point(s):\n s.split(',')\n _, p1, p2, _ = re.split(',|\\\\(|\\\\)', s)\n # pt = re.findall('\\d+\\.*\\d*', s)\n return (float(p1), float(p2))\n\ndef parse_line(s):\n split = s.find('--')\n start = parse_point(s[:split])\n end = parse_point(s[split+2:])\n return (start, end)\n\ndef parse_pole(line):\n split = line.find('|')\n left = line[:split]\n right = line[split+1:]\n return (parse_line(left), parse_line(right))\n\ndef parse_gt_pole(s):\n # print(s)\n floats = list(map(float, re.split(',|;', s)))\n points = [(x, y) for x, y in zip(floats[0::2], floats[1::2])]\n points = sorted(points, key=lambda p:p[1])\n top = points[:2]\n bottom = points[2:]\n top = sorted(top)\n bottom = sorted(bottom)\n return ((top[0], bottom[0]), (top[1], bottom[1]))\n\ndef parse_gt(fp):\n tree = ET.parse(fp)\n gt_map = {}\n for c in tree.getroot().getchildren():\n if 'image' == c.tag:\n poles = [parse_gt_pole(p.get('points')) for p in c.getchildren() if 'points' in p.keys()]\n name = split(c.get('name'))[-1]\n name = splitext(name)[0]\n gt_map[name] = poles\n return gt_map\n\ndef area_of_bbox(bbox):\n a = (bbox[1][0] - bbox[0][0]) * (bbox[1][1] - bbox[0][1])\n assert a >= 0\n return a\n\n\ndef bbox_of_pole(pole):\n pts = (pole[0][0], pole[0][1], pole[1][0], pole[1][1])\n x_min = min(pole[0][0][0], pole[0][1][0])\n x_max = max(pole[1][0][0], pole[1][1][0])\n\n y_min = min(pole[0][0][1], pole[0][1][1])\n y_max = max(pole[1][0][1], pole[1][1][1])\n return((x_min, y_min), (x_max, y_max))\n\ndef polygon_of_pole(pole):\n assert pole[0][0][1] < pole[0][1][1], pole\n assert pole[1][0][1] < pole[1][1][1], pole\n points = [pole[0][0], pole[0][1], pole[1][1], pole[1][0]]\n return Polygon(points)\n\n\ndef calculate_iou_of_poles(pole_a, pole_b):\n polygon_a = polygon_of_pole(pole_a)\n polygon_b = polygon_of_pole(pole_b)\n # print(polygon_a)\n # print(polygon_b)\n try:\n intersection = polygon_a.intersection(polygon_b)\n except Exception as e:\n print(e)\n # paint_polygons(polygon_a, polygon_b)\n # plt.show()\n return 0.0\n else:\n # print(intersection)\n return intersection.area/ (polygon_a.area + polygon_b.area - intersection.area)\n\ndef calculate_iou_of_bbox(boxA, boxB):\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0][0], boxB[0][0])\n yA = max(boxA[0][1], boxB[0][1])\n xB = min(boxA[1][0], boxB[1][0])\n yB = min(boxA[1][1], boxB[1][1])\n\n # compute the area of intersection rectangle\n interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))\n # print(\"intersection area=\", interArea)\n if interArea == 0:\n return 0\n # compute the area of both the prediction and ground-truth\n # rectangles\n # boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))\n # boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(area_of_bbox(boxA)+ area_of_bbox(boxB)- interArea)\n\n # return the intersection over union value\n return iou\n\n\nIOU_THRESHOLD = 0.5\nEPS = 1e-9\n\n\n\ndef compare_with_groundtruth(detected_poles, ground_truth):\n true_detection = []\n not_detected = []\n matched = [False] * len(detected_poles)\n for g in ground_truth:\n iou_list = [calculate_iou_of_poles(g, p) for p in detected_poles]\n max_idx = np.argmax(iou_list)\n if iou_list[max_idx] > IOU_THRESHOLD:\n true_detection.append((g, detected_poles[max_idx]))\n matched[max_idx] = True\n else:\n not_detected.append(g)\n false_detection = [p for m, p in zip(matched, detected_poles) if not m]\n return true_detection, false_detection, not_detected\n\nclass DetectionEvaluator:\n\n def __init__(self, gt_fp, detection_directory):\n self.gt_map = parse_gt(gt_fp)\n self.detection_map = {}\n for file_name in os.listdir(detection_directory):\n if not file_name.endswith('.txt'):\n continue\n self.evaluate(join(detection_directory, file_name))\n\n def __getitem__(self, key):\n return self.detection_map[key]\n\n def evaluate(self, detection_file_path):\n sample_name = splitext(split(detection_file_path)[-1])[0]\n with open(detection_file_path, 'r') as f:\n detected_poles = [parse_pole(l) for l in f.readlines()]\n # print(\"detected %d poles in %s\" % (len(detected_poles), file_name))\n true_detection = []\n false_detection = []\n ground_truth = self.gt_map[sample_name]\n not_detected = ground_truth\n if len(detected_poles) != 0:\n true_detection, false_detection, not_detected = compare_with_groundtruth(detected_poles, ground_truth)\n self.detection_map[sample_name] = {'true_detection': true_detection,\n 'false_detection': false_detection,\n 'not_detected': not_detected,\n 'true_positive': len(true_detection),\n 'positive': len(detected_poles),\n 'groundtruth_count':len(ground_truth),\n 'precision': len(true_detection) / (len(detected_poles) + EPS),\n 'recall': len(true_detection) / (len(ground_truth) + EPS)}\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('groundtruth_path')\n parser.add_argument('detection_result_directory')\n args = parser.parse_args()\n eva = DetectionEvaluator(args.groundtruth_path, args.detection_result_directory)\n true_positive = 0\n positive = 0\n groundtruth_count = 0\n for e in eva.detection_map.values():\n true_positive += e['true_positive']\n positive += e['positive']\n groundtruth_count += e['groundtruth_count']\n print('precision=%f, recall=%f' % (true_positive/positive, true_positive/groundtruth_count))\n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hemant2801/Big-mart-sales-prediction
[ "cac2748ed6a446095d568e005601d941f586e104" ]
[ "Sales prediction model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Importing all the dependencies\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom xgboost import XGBRegressor\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# # Data collection and analysis\n\n# In[2]:\n\n\ndf = pd.read_csv('C:/Users/Hemant/jupyter_codes/ML Project 1/Big mart sales prediction/train.csv')\n\n\n# In[3]:\n\n\n#print the fisrt 5 rows of the dataset\n'''\nFD = food \nDR = drink\nNC = non consumable\n\n'''\ndf.head()\n\n\n# In[4]:\n\n\n# print the last 5 rows of the dataset\ndf.tail()\n\n\n# In[5]:\n\n\n# shape of the dataset\ndf.shape\n\n\n# In[6]:\n\n\n# getting some info about the dataset\ndf.info()\n\n\n# In[7]:\n\n\n#checking for any missing values\ndf.isnull().sum()\n\n\n# In[8]:\n\n\n# stastical measure of the dataset\ndf.describe()\n\n\n# In[9]:\n\n\n#checking for categorical data in diff object type columns\nobjlist = df.select_dtypes('object').columns\nfor i in objlist:\n print(f'\\n{i}')\n print(df[i].value_counts(), end = '\\n') \n\n\n# Handling the missing values\n# \n# Mean ---> Average value\n# Mode ---> Most repeated value\n\n# In[10]:\n\n\n# mean value of 'Item weight' collumn\nmean_value = df['Item_Weight'].mean()\n\n\n# In[11]:\n\n\n# filling the missing value with mean in 'item weight' column\ndf['Item_Weight'].fillna(mean_value, inplace = True)\n\n\n# In[12]:\n\n\n#checking for missing values\ndf.isnull().sum()\n\n\n# In[13]:\n\n\n# replacing the missing value with mode in 'Outlet Size' column\nmode_value = df.pivot_table(values = 'Outlet_Size', columns = 'Outlet_Type', aggfunc = (lambda x : x.mode()[0]))\n\n\n# In[14]:\n\n\nprint(mode_value)\n\n\n# In[15]:\n\n\nmissing_values = df['Outlet_Size'].isnull()\n\n\n# In[16]:\n\n\ndf.loc[missing_values, 'Outlet_Size'] = df.loc[missing_values, 'Outlet_Type'].apply(lambda x : mode_value[x])\n\n\n# In[17]:\n\n\n#checking for missing values\ndf.isnull().sum()\n\n\n# Data analysis\n\n# In[18]:\n\n\n# stastical measure of the data\ndf.describe()\n\n\n# Numerical features\n\n# In[19]:\n\n\nsns.set_style(style = 'darkgrid')\n\n\n# In[20]:\n\n\n#item weight distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Weight'], kde= True)\nplt.show()\n\n\n# In[21]:\n\n\n#item visibility distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Visibility'], kde= True)\nplt.show()\n\n\n# In[22]:\n\n\n#item MRP distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_MRP'], kde= True)\nplt.show()\n\n\n# In[23]:\n\n\n#Item_Outlet_Sales distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Outlet_Sales'], kde= True)\nplt.show()\n\n\n# In[24]:\n\n\n#Outlet_Establishment_Year distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x= 'Outlet_Establishment_Year', data = df)\nplt.show()\n\n\n# Categoruical features\n\n# In[25]:\n\n\n#Item_Fat_Content distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x= 'Item_Fat_Content', data = df)\nplt.show()\n\n\n# In[26]:\n\n\n# Item_Type\t distribution\nplt.figure(figsize = (30,6))\nsns.countplot(x= 'Item_Type', data = df)\nplt.show()\n\n\n# In[27]:\n\n\n# Outlet location type distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x = 'Outlet_Location_Type', data = df)\nplt.show()\n\n\n# # Data preprocessing\n\n# In[28]:\n\n\ndf.head()\n\n\n# In[29]:\n\n\ndf['Item_Fat_Content'].value_counts()\n\n\n# In[30]:\n\n\ndf.replace({'Item_Fat_Content' : {'low fat' : 'Low Fat', 'LF' : 'Low Fat', 'reg' : 'Regular'}}, inplace = True)\n\n\n# In[31]:\n\n\ndf['Item_Fat_Content'].value_counts()\n\n\n# Label Encoding\n\n# In[32]:\n\n\nencoder = LabelEncoder()\n\nobjlist = df.select_dtypes('object').columns\nfor i in objlist:\n df[i] = encoder.fit_transform(df[i])\n\n\n# In[33]:\n\n\ndf.head()\n\n\n# In[34]:\n\n\ncorrelation = df.corr()\n\n\n# In[43]:\n\n\nplt.figure(figsize = (20,20))\nsns.heatmap(correlation , cbar = True, cmap = 'Blues',square = True, annot = True, fmt = '.1f', annot_kws = {'size' : 8})\n\n\n# Splitting features and targets\n\n# In[36]:\n\n\nX = df.drop(columns = 'Item_Outlet_Sales' ,axis = 1)\nY = df['Item_Outlet_Sales']\n\n\n# # Splitting the data into training and testing data\n\n# In[37]:\n\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = .2, random_state = 6)\n\n\n# In[38]:\n\n\nprint(x_train.shape, x_test.shape)\nprint(y_train.shape, y_test.shape)\n\n\n# # Machine learning model\n\n# In[39]:\n\n\nmodel = XGBRegressor()\n\n\n# In[40]:\n\n\nmodel.fit(x_train, y_train)\n\n\n# Model evaluatuion on training data\n\n# In[41]:\n\n\ntrain_prediction = model.predict(x_train)\n\naccuracy_training = metrics.r2_score(y_train, train_prediction)\nprint('R SQUARED ERROR OF TRAINING DATA :', accuracy_training)\n\n\n# Model evaluatuion on testing data\n\n# In[42]:\n\n\ntest_prediction = model.predict(x_test)\n\naccuracy_testing = metrics.r2_score(y_test, test_prediction)\nprint('R SQUARED ERROR OF TESTING DATA :', accuracy_testing)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.LabelEncoder", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
peekxc/tallem
[ "949af20c1f50f9b6784ee32463e59123cd64294b", "949af20c1f50f9b6784ee32463e59123cd64294b" ]
[ "notebooks/numba_mds.py", "src/tallem/datasets.py" ]
[ "import os\nimport numpy as np\nimport numpy.typing as npt\nfrom numpy.typing import ArrayLike\nfrom typing import *\nfrom .distance import *\nfrom .utility import *\nfrom scipy.sparse.linalg import eigs as truncated_eig\nfrom scipy.linalg import eigh, eig as dense_eig\nfrom scipy.spatial import KDTree\nfrom scipy.sparse import csc_matrix, csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree, connected_components\n\nimport numpy as np\nimport numba as nb\nfrom numba import njit, types, float32, float64, int32, int64, prange\nfrom numba.extending import overload\n\n\n@njit('float64[:,:](float64[:,:])', fastmath=True,parallel=False)\ndef average_rows(x):\n\tassert x.ndim == 2\n\tres = np.zeros((1, x.shape[0]),dtype=np.float64)\n\tfor i in prange(x.shape[0]):\n\t\tres += x[i,:]\n\treturn res / x.shape[0]\n\n@njit('float64[:,:](float64[:,:])', fastmath=True,parallel=False)\ndef average_cols(x):\n\tassert x.ndim == 2\n\tres = np.zeros((1, x.shape[1]),dtype=np.float64)\n\tfor i in prange(x.shape[1]):\n\t\tres += x[:,i]\n\treturn res / x.shape[1]\n\n#test.parallel_diagnostics(level=4)\n\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba_naive(D, d):\n\tn = D.shape[0]\n\tH = np.eye(n) - (1.0/n)*np.ones(shape=(n,n)) # centering matrix\n\tB = -0.5 * H @ D @ H\n\tevals, evecs = np.linalg.eigh(B)\n\tevals, evecs = np.flip(evals)[np.arange(d)], np.fliplr(evecs)[:,np.arange(d)] \n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = evecs[:,w] @ np.diag(np.sqrt(evals[w]))\n\treturn(Y)\n\n## Classical MDS with Numba\n@njit(nb.types.Tuple((float64[:], float64[:,:]))(float64[:,:], int32), fastmath=False)\ndef cmds_numba_E(D, d):\n\t''' Given distance matrix 'D' and dimension 'd', computes the classical MDS '''\n\tD = -0.5*(D - average_rows(D) - average_cols(D).T + np.mean(D))\n\tevals, evecs = np.linalg.eigh(D)\n\tevals, evecs = np.flip(evals)[:d] , np.fliplr(evecs)[:,:d] \n\treturn((evals, evecs))\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba(D, d):\n\tn = D.shape[0]\n\tevals, evecs = cmds_numba_E(D, d)\n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = np.dot(evecs[:,w], np.diag(np.sqrt(evals[w])))\n\treturn(Y)\n\nfrom tallem.syevr import numba_dsyevr\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba_fortran(D, d):\n\tn = D.shape[0]\n\tD = -0.5*(D - average_rows(D) - average_cols(D).T + np.mean(D))\n\tevals, evecs, i, e = numba_dsyevr(D, n-d+1, n, 1e-8)\n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = np.dot(evecs[:,w], np.diag(np.sqrt(evals[w])))\n\treturn(Y)\n\n@njit('float64[:,:](float64[:,:], float64[:,:], int32)', fastmath=False)\ndef landmark_cmds_numba(LD, S, d):\n\t''' \n\tBarbones landmark MDS with Numba \n\t\n\tLD := (k x k) landmark distance matrix \n\tS := (k x n) matrix of distances from the n points to the k landmark points, where n > k\n\td := dimension of output coordinitization\n\t'''\n\tn = S.shape[1]\n\tevals, evecs = cmds_numba_E(LD, d)\n\tmean_landmark = average_cols(LD).T\n\tw = np.flatnonzero(evals > 0)\n\tL_pseudo = evecs/np.sqrt(evals[w])\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = (-0.5*(L_pseudo.T @ (S.T - mean_landmark.T).T)).T \n\treturn(Y)\n\n# lapack.dsyevr(jobz, rng, uplo, N, D, N, vl, vu, il, iu, tol, m, w, Z, ldz, isuppz, work, lwork, iwork, liwork, info)\n@njit('float64[:,:](float64[:,:])', parallel=True)\ndef dist_matrix(X):\n\tn = X.shape[0]\n\tD = np.zeros((n,n))\n\tfor i in np.arange(n):\n\t\tfor j in np.arange(n):\n\t\t\tD[i,j] = np.sum((X[i,:]-X[j,:])**2)\n\treturn(D)\n\n# @njit('float64[:,:](float64[:,:], int32[:], int32[:], int32)', parallel=True)\n# def bench_parallel(X, subsets_vec, subsets_len, d):\n# \tresults = []\n# \tfor i in prange(len(subsets_vec)-1):\n# \t\tind = np.arange(np.subsets_vec[i], subsets_vec[i+1])\n# \t\tD = dist_matrix(X[ind,:])\n# \t\tresults.append(cmds_numba(D, d))\n# \treturn(results)\n\n#from numba import njit, prange\n# @njit(parallel=True)\n# def fit_local_models(f, X, cover):\n# \tindex_set = list(cover.keys())\n# \tsubsets = list(cover.values())\n# \tresult = {}\n# \tfor j in prange(len(cover)):\n# \t\tindex, subset = index_set[j], subsets[j]\n# \t\tresult[index] = f(X[np.array(subset),:])\n# \treturn(result)\n", "import numpy as np\nfrom typing import *\nfrom numpy.typing import ArrayLike\nfrom scipy.spatial import Delaunay\nfrom tallem.utility import ask_package_install, package_exists\n\ndef flywing():\n\t''' Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis) '''\n\tarr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])\n\tarr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])\n\treturn([arr1, arr2])\n\ndef gaussian_blob(n_pixels: int, r: float):\n\t'''\n\tGenerates a closure which, given a 2D location *mu=(x,y)*, generates a white blob \n\twith [normalized] radius 0 < r <= 1 in a (n_pixels x n_pixels) image. \n\n\tIf *mu* is in [0,1] x [0,1], the center of the white blob should be visible\n\tIf *mu* has as both of its coordinates outside of [0,1]x[0,1], the blob may be partially visible\n\tIf *mu* has both of its coordinates outside of [-r, 1+r]x[-r, 1+r], then image should be essentially black\n\n\tThe returned closure completely autograd's numpy wrapper to do the image generation. Thus, the resulting \n\tfunction can be differentiated (w.r.t *mu*) using the reverse-mode differentiation process that *autograd* provides.\n\n\tThis function also returns the global normalizing constant needed normalize the pixel intensities in [0,1],\n\tfor plotting or other purposes.\n\n\tReturn: (blob, c) where\n\t - blob := differentiable closure which, given a vector (x,y), generates the blob image a flat vector.\n\t - c := maximum value of the intensity of any given pixel for any choice of *mu*.\n\t'''\n\timport autograd.numpy as auto_np\n\tsd = r/3.090232\n\tsigma = sd**2\n\tsigma_inv = 1.0/sigma\n\tdenom = np.sqrt(((2*auto_np.pi)**2) * (sigma**2))\n\tdef blob(mu): # mu can be anywhere; center of image is [0.5, 0.5]\n\t\tloc = auto_np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels)\n\t\tx,y = auto_np.meshgrid(loc, loc)\n\t\tgrid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom\n\t\treturn(auto_np.ravel(grid).flatten())\n\treturn(blob, auto_np.exp(0)/denom)\n\ndef plot_image(P, figsize=(8,8), max_val = \"default\"):\n\tif max_val == \"default\": max_val = np.max(P)\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure(figsize=figsize)\n\tplt.imshow(P, cmap='gray', vmin=0, vmax=max_val)\n\tfig.gca().axes.get_xaxis().set_visible(False)\n\tfig.gca().axes.get_yaxis().set_visible(False)\n\ndef plot_images(P, shape, max_val = \"default\", figsize=(8,8), layout = None):\n\t''' \n\t\tP := numpy array where each row is a grayscale image\n\t\tshape := the shape to reshape each row of P prior to plotting\n\t'''\n\timport matplotlib.pyplot as plt\n\tif max_val == \"default\": \n\t\tmax_val = np.max(P)\n\tif P.ndim == 1:\n\t\tfig = plt.figure(figsize=figsize)\n\t\tplt.imshow(P.reshape(shape), cmap='gray', vmin=0, vmax=max_val)\n\t\tfig.gca().axes.get_xaxis().set_visible(False)\n\t\tfig.gca().axes.get_yaxis().set_visible(False)\n\t\treturn(fig, ax)\n\telse:\n\t\tassert layout is not None, \"missing layout\"\n\t\tfig, axs = plt.subplots(*layout, figsize=figsize)\n\t\taxs = axs.flatten()\n\t\tfor i, (img, ax) in enumerate(zip(P, axs)):\n\t\t\t#fig.add_subplot(layout[0], layout[1], i+1)\n\t\t\tplt.axis(\"off\")\n\t\t\tax.imshow(P[i,:].reshape(shape), cmap='gray', vmin=0, vmax=max_val, aspect='auto')\n\t\t\tax.axes.xaxis.set_visible(False)\n\t\t\tax.axes.yaxis.set_visible(False)\n\t\tplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)\n\t\treturn(fig, axs)\n\ndef scatter2D(P, layout = None, figsize=(8,8), **kwargs):\n\timport matplotlib.pyplot as plt\n\tif isinstance(P, np.ndarray):\n\t\tif \"fig\" in kwargs.keys() and \"ax\" in kwargs.keys():\n\t\t\tfig, ax = kwargs[\"fig\"], kwargs[\"ax\"]\n\t\t\tkwargs.pop('fig', None)\n\t\t\tkwargs.pop('ax', None)\n\t\telse:\n\t\t\tfig = plt.figure(figsize=figsize)\n\t\t\tax = fig.add_subplot()\n\t\tax.scatter(*P.T, **kwargs)\n\t\treturn(fig, ax)\n\telif isinstance(P, Iterable):\n\t\tassert layout is not None, \"missing layout\"\n\t\tassert len(P) == np.prod(layout)\n\t\tif \"fig\" in kwargs.keys() and \"ax\" in kwargs.keys():\n\t\t\tfig, ax = kwargs[\"fig\"], kwargs[\"ax\"]\n\t\t\tkwargs.pop('fig', None)\n\t\telse:\n\t\t\tfig = plt.figure(figsize=figsize)\n\t\tfor i, p in enumerate(P):\n\t\t\tax = fig.add_subplot(layout[0], layout[1], i+1)\n\t\t\tax.scatter(*p.T, **kwargs) \n\t\treturn(fig, ax)\n\ndef scatter3D(P, angles = None, layout = None, figsize=(8,8), **kwargs):\n\timport matplotlib.pyplot as plt\n\tif isinstance(P, np.ndarray):\n\t\timport numbers\n\t\tif angles is not None:\n\t\t\tif isinstance(angles, numbers.Integral): \n\t\t\t\tangles = np.linspace(0, 360, angles, endpoint=False)\n\t\t\tassert len(angles) == np.prod(layout)\n\t\t\tif \"fig\" in kwargs.keys() and \"ax\" in kwargs.keys():\n\t\t\t\tfig, ax = kwargs[\"fig\"], kwargs[\"ax\"]\n\t\t\t\tkwargs.pop('fig', None)\n\t\t\t\tkwargs.pop('ax', None)\n\t\t\telse: \n\t\t\t\tfig, ax = plt.subplots(*layout, figsize=figsize)\n\t\t\tfor i, theta in enumerate(angles):\n\t\t\t\tax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')\n\t\t\t\tax.scatter3D(*P.T, **kwargs) \n\t\t\t\tax.view_init(30, theta)\n\t\telse: \n\t\t\tif \"fig\" in kwargs.keys() and \"ax\" in kwargs.keys():\n\t\t\t\tfig, ax = kwargs[\"fig\"], kwargs[\"ax\"]\n\t\t\t\tkwargs.pop('fig', None)\n\t\t\t\tkwargs.pop('ax', None)\n\t\t\telse: \n\t\t\t\tfig = plt.figure(figsize=figsize)\n\t\t\t\tax = fig.add_subplot(projection='3d')\n\t\t\tax.scatter3D(*P.T, **kwargs)\n\telif isinstance(P, Iterable):\n\t\timport numbers\n\t\tassert layout is not None, \"missing layout\"\n\t\tif angles is None:\n\t\t\tangles = np.repeat(60, len(P))\n\t\telif isinstance(angles, numbers.Integral):\n\t\t\tangles = np.linspace(0, 2*np.pi, len(P), endpoint=False)\n\t\tassert len(angles) == np.prod(layout)\n\t\tif \"fig\" in kwargs.keys() and \"ax\" in kwargs.keys():\n\t\t\tfig, ax = kwargs[\"fig\"], kwargs[\"ax\"]\n\t\t\tkwargs.pop('fig', None)\n\t\t\tkwargs.pop('ax', None)\n\t\telse:\n\t\t\tfig, ax = plt.subplots(*layout, figsize=figsize)\n\t\tfor i, p in enumerate(P):\n\t\t\tax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')\n\t\t\tax.scatter3D(*p.T, **kwargs) \n\t\t\tax.view_init(30, angles[i])\n\tplt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);\n\treturn(fig, ax)\n\ndef rotating_disk(n_pixels: int, r: float, sigma: float = 1.0):\n\tfrom scipy.ndimage import gaussian_filter\n\timport numpy as np\n\tI = np.zeros(shape=(n_pixels, n_pixels))\n\tp = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space\n\tz = np.array([r, 0.0]).reshape((2,1))\n\td = np.array([0.5, 0.5]).reshape((2,1))\n\tx,y = np.meshgrid(p, p)\n\tdef disk_image(theta: float):\n\t\tR = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n\t\tc = (R @ z) + d # center of disk in [0,1]^2\n\t\tD = np.flipud(np.sqrt((x - c[0])**2 + (y - c[1])**2))\n\t\tD[D <= r] = -1.0\n\t\tD[D > r] = 0.0\n\t\tD[D == -1.0] = 1.0\n\t\treturn(np.ravel(gaussian_filter(D, sigma=1.0)).flatten())\n\treturn(disk_image, 1.0)\n\n# D = np.zeros(np.prod(x.shape))\n# for i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):\n# \tp = np.array([xi,yi])\n# \tD[i] = np.dot(p-b, u)# np.linalg.norm(z-v)\n\ndef white_bars(n_pixels: int, r: float, sigma: float = 1.0):\n\t''' \n\tReturns a parameterization that yields a white vertical bar at various orientations in an image. \n\n\tFixed parameters: \n\t\tn_pixels := number of pixels to make square image\n\t\tr := constant between [0,1] indicating how wide to make the bar \n\t\tsigma := kernel parameter for gaussian blur\n\tReturns:\n\t\tbar := closure w/ parameters y_offset in [0, 1] and theta in [0, pi]\n\t\tc := normalizing constant for plotting\n\t'''\n\tfrom scipy.ndimage import gaussian_filter\n\timport numpy as np\n\tw = r\n\tp = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space\n\tx,y = np.meshgrid(p,p)\n\tc = np.array([0.5, 0.5]) \t# center of image\n\tdef bar(theta: float, d: float):\n\t\tassert np.all(np.bitwise_and(d >= -1.0, d <= 1.0)), \"d must be in the range [-1, 1]\"\n\t\tassert np.all(np.bitwise_and(theta >= 0.0, theta <= np.pi)), \"theta must be in the range [0, pi]\"\n\t\tu = np.array([ 1.0, np.tan(theta) ])\n\t\tu = u / np.linalg.norm(u)\n\t\tc = np.array([0.5, 0.5]) \t# center of image\n\t\td = d * (np.sqrt(2) / 2) # scale where to place center of bar \n\t\tif theta > np.pi/2:\n\t\t\td = -d\n\t\tb = c + d*u \t\t\t\t\t\t\t# center of bar \n\t\tD = np.zeros(np.prod(x.shape))\n\t\tfor i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):\n\t\t\tp = np.array([xi,yi])\n\t\t\tD[i] = np.dot(p-b, u)# np.linalg.norm(z-v)\n\t\tI = abs(D.reshape((n_pixels, n_pixels))).T\n\t\tI[I > w] = 1\n\t\tI = 1.0 - I\n\t\treturn(gaussian_filter(I, sigma=sigma))\n\tc = np.max(bar(0.0, 0.0))\n\treturn(bar, c)\n\n\t# u = np.array([ 1.0, np.tan(theta) ])\n\t# u = u / np.linalg.norm(u)\n\t# d = np.array([-di if ti <= np.pi/2 else di for di,ti in zip(d, theta)])*(np.sqrt(2) / 2)\n\t# U = np.c_[np.repeat(1.0, len(theta)), theta]\n\t# U = U / np.linalg.norm(U, axis = 1, keepdims = True)\n\t# B = c + d.reshape((len(d), 1)) * U \t# center of bars\n\t# D = [abs((x - b[0])*u[0] + (y - b[1])*u[1]).T for (u, b) in zip(U, B)]\n\t# # b = c + d*u \t\t\t\t\t\t\t# center of bar \n\t# # D = (x - b[0])*u[0] + (y - b[1])*u[1]\n\t# # I = abs(D.reshape((n_pixels, n_pixels))).T\n\t# images = np.zeros((B.shape[0], n_pixels**2))\n\t# for i, img in enumerate(D):\n\t# \timg[img > w] = 1\n\t# \timg = 1.0 - img\n\t# \timages[i,:] = np.ravel(gaussian_filter(img, sigma=sigma).flatten())\n\t# return(images)\n\n\t# from scipy.ndimage import gaussian_filter\n\t# import numpy as np\n\t# w = r*np.sqrt(2)\n\t# p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space\n\t# x,y = np.meshgrid(p,p)\n\t# def bar(y_offset: float, theta: float):\n\t# \tassert y_offset >= 0.0 and y_offset <= 1.0 \n\t# \tassert theta >= 0.0 and theta <= np.pi\n\t# \t# z = np.array([0.5, y_offset]) # intercept\n\t# \t# dist_to_line = np.cos(theta)*(z[1] - y) - np.sin(theta)*(z[0]-x)\n\t# \t# dist_to_line = ((y - y_offset)/np.tan(theta))*np.sin(theta)\n\t# \t# Z = np.array([np.array([xi,yi]) for xi,yi in zip(x.flatten(),y.flatten())])\n\t# \t# fig,ax = scatter2D(Z, c=\"blue\")\n\t# \t# fig,ax = scatter2D(np.array(P), c=\"red\", fig=fig, ax=ax)\n\t# \t# fig,ax = scatter2D(np.array([0.5, 0.5]), c=\"green\", fig=fig, ax=ax)\n\t# \t# fig,ax = scatter2D(np.c_[x.flatten(), x.flatten()*m + b], c=\"purple\", fig=fig, ax=ax)\n\t\t\n\t# \tm, b = np.tan(theta), y_offset\n\t# \t#pt = np.array([1.0, m + b])\n\t# \tz1 = np.array([0.50, b])\n\t# \tz2 = np.array([1.0, 1.0*m + b])\n\t# \tpt = z2 - z1\n\t# \td = []\n\t# \tP = []\n\t# \tfor xi,yi in zip(x.flatten(),y.flatten()):\n\t# \t\tu = pt / np.linalg.norm(pt)\n\t# \t\tv = np.array([xi,yi])\n\t# \t\tz = u*np.dot(v-np.array([0.5, b]), u)+np.array([0.5, b])\n\t# \t\td.append(np.linalg.norm(z-v))\n\t# \t\tP.append(z)\n\t# \tdist_to_line = np.array(d)\n\t# \t# fig, ax = scatter2D(np.array(P))\n\n\t# \tI = abs(dist_to_line.reshape((n_pixels, n_pixels)))\n\t# \tI = np.flipud(I) # make origin lower-left, not top-left \n\t# \t# I = (np.sqrt(2)/2)*(I/np.max(I))\n\t# \tI[I > w] = np.sqrt(2)\n\t# \tI = np.sqrt(2) - I ## invert intensity \n\t# \t# I[I < (np.sqrt(2) - w)] = 0.0\n\t# \t# B = I.copy()\n\t# \t# I[I <= w] = -1.0\n\t# \t# I[I > w] = 0.0\n\t# \t# I[I == -1.0] = np.max(B[B <= w]) - B[B <= w] # 1.0 \n\t# \treturn(gaussian_filter(I, sigma=sigma))\n\t# c = np.max(bar(0.0, 0.0))\n\t# return(bar, c)\n\n# def _gaussian_pixel(d, n_pixels):\n# \tfrom scipy.stats import norm\n# \tsigma = d/3.0\n# \tSigma = auto_np.diag([sigma, sigma])\n# \tsigma_inv = auto_np.linalg.inv(Sigma)[0,0]\n# \tdenom = np.sqrt(((2*np.pi)**2) * auto_np.linalg.det(Sigma))\n# \tnormal_constant = norm.pdf(0, loc=0, scale=sigma)\n# \tdef blob(mu): # generates blob at location mu \n# \t\t# mu = mu.reshape((2, 1))\n# \t\t# np.exp(-0.5 * ((x - mu).T @ SigmaI @ (x - mu))).flatten()\n# \t\t#x, y = auto_np.meshgrid(auto_np.arange(n_pixels), auto_np.arange(n_pixels))\n# \t\tloc = auto_np.linspace(0, 1, n_pixels, False) + (1/(2*n_pixels))\n# \t\tx,y = auto_np.meshgrid(loc, loc)\n# \t\tgrid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom\n# \t\t#grid = auto_np.exp(-0.5*((x - mu[0])**2 + (y - mu[1])**2))/denom\n# \t\t#return(auto_np.ravel(grid).flatten())\n# \t\treturn(grid/normal_constant)\n# \treturn(blob)\n# plot_image(gaussian_pixel2(1/32, 11)([-0.5, 0.5]))\n\n\n\n\ndef white_dot(n_pixels: int, r: float, n: Optional[int], method: Optional[str] = \"grid\", mu: Optional[ArrayLike] = None):\n\t''' \n\tGenerates a grayscale image data set where white blobs are placed on a (n_pixels x n_pixels) grid\n\tusing a multivariate normal density whose standard deviation sigma (in both directions) is sigma=d/3.\n\tIf 'n' is specified, then 'n' samples are generated from a larger space s([-d, 1+d]^2) where s(*)\n\tdenotes the scaling of the interval [-d,1+d] by 'n_pixels'. \n\n\tParameters: \n\t\tn_pixels := number of pixels wide/tall to make the resulting images \n\t\tr := relative radius of dot (in (0, 1])\n\t\tn := (optional) number of samples desired \n\t\tmethod := (optional) how to generate samples in the parameter space. Can be either \"grid\" or \"random\".\n\t\tmu := (optional) locations of dot centers to generate the dots at\n\t\n\tReturns: \n\t\tsamples := generated image samples \n\t\tparams\t:= (x,y,i) parameters associated with each sample,\n\t\tf \t\t\t:= closure for generating more samples. See gaussian_blob() for more details. \n\t\tc \t\t\t:= normalizing constant. See gaussian_blob() for more details. \n\t'''\n\tassert r > 0 and r <= 1.0, \"r must be in the range 0 < r <= 1.0\"\n\tassert isinstance(n, int) or isinstance(n, tuple), \"n must be integer of tuple of integers\"\n\task_package_install(\"autograd\")\n\timport numpy as np\n\timport autograd.numpy as auto_np\n\n\t## First generate the closure to make the images\n\tblob, c = gaussian_blob(n_pixels, r)\n\n\tif not(mu is None):\n\t\tsamples = np.vstack([blob(auto_np.array([x,y])) for x,y in mu])\n\t\tparams = mu \n\telif method == \"random\":\n\t\t## Generate uniformly random locations (in domain)\n\t\tassert n is not None, \"'n' must be supplied if 'mu' is not.\"\n\t\tn1, n2 = (n, n) if isinstance(n, int) else (n[0], n[1])\n\t\t\n\t\tsamples, params = [], []\n\t\tX, Y = np.random.uniform(size=n1,low=-r,high=1+r), np.random.uniform(size=n1,low=-r,high=1+r)\n\t\tfor x,y in zip(X, Y):\n\t\t\tsamples.append(blob(auto_np.array([x,y])))\n\t\t\tparams.append([x, y, 1.0])\n\t\t\n\t\tNP = blob(auto_np.array([0.5, 0.5]))\n\t\tfor t in np.random.uniform(size=n2, low=0.0, high=1.0):\n\t\t\tsamples.append(t*NP)\n\t\t\tparams.append([0.5, 0.5, 1-t])\n\t\t\n\t\t## Vertically stack \n\t\tsamples, params = np.vstack(samples), np.vstack(params)\n\n\telif method == \"grid\":\n\t\tassert n is not None, \"'n' must be supplied if 'mu' is not.\"\n\t\tif isinstance(n, int):\n\t\t\tn1, n2 = (n, n) \n\t\telse:\n\t\t\tn1, n2 = (n[0], n[1])\n\t\tng = int(np.floor(np.sqrt(n1)))\n\t\tsamples, params = [], []\n\t\tfor x in np.linspace(0.0-r,1.0+r,ng):\n\t\t\tfor y in np.linspace(0.0-r,1.0+r,ng):\n\t\t\t\tsamples.append(blob(auto_np.array([x, y])))\n\t\t\t\tparams.append([x, y, 1.0])\n\t\t\n\t\t## Generate the pole\n\t\tNP = blob(auto_np.array([0.5, 0.5]))\n\t\tfor t in np.linspace(0, 1, n2):\n\t\t\tsamples.append(t*NP)\n\t\t\tparams.append([0.5, 0.5, 1-t])\n\n\t\t## Vertically stack \n\t\tsamples, params = np.vstack(samples), np.vstack(params)\n\n\t## Return the data \n\treturn(samples, params, blob, c)\n\ndef mobius_band(n_polar=66, n_wide=9, scale_band=0.25):\n\t''' \n\tGenerates stratified samples on a Mobius band embedded in R^3 \n\n\tTo get uniform samples, N = (n_polar*n_wide) uniformly spaced coordinates are generated initially \n\tfrom the intrinsic space of M. These points are converted to their extrinsic (3D) coordinates and \n\tare then triangulated using a Delaunay triangulation. Finally, using the Delaunay triangles as stratum, \n\ta stratified sampling scheme is employed by sampling randomly from each triangle using its barycentric \n\tcoordinates. This stratification ensures the samples are both sufficiently random but sufficiently \"uniformly \n\tspaced\" around the band. \n\t\n\tReturns: \n\t\t- M := (n x 3) matrix of embedding coordinates \n\t\t- B := (n x 2) matrix of intrinsic coordinates\n\t\t\n\tIn the intrinsic coordinates, B[:,0] is the width parameter and B[:,1] is the angular coordinate\n\t'''\n\n\t## Generate random (deterministic) polar coordinates around Mobius Band\n\tnp.random.seed(0) \n\ts = np.linspace(-scale_band, scale_band, 2*n_wide)\t# width/radius\n\tt = np.linspace(0, 2*np.pi, n_polar) \t\t\t\t\t\t\t# circular coordinate \n\ts, t = np.meshgrid(s, t)\n\n\t## Triangulate to allow stratification\n\tM = np.c_[np.ravel(s), np.ravel(t)]\n\tV = M[Delaunay(M).simplices]\n\n\t## Sample within each strata via random barycentric coordinates \n\tnormalize = lambda x: x / np.sum(x) \n\tY = np.array([np.sum(v * normalize(np.random.uniform(size=(3,1))), axis = 0) for v in V])\n\n\t## Convert to 3d\n\tS, T = Y[:,0], Y[:,1]\n\tphi = 0.5 * T\n\tr = 1 + S * np.cos(phi)\n\tMB = np.c_[r * np.cos(T), r * np.sin(T), S * np.sin(phi)]\n\n\t## Return both 3D embedding + original parameters\n\treturn(MB, Y)\n\n\ndef embed(a: ArrayLike, D: int, method=\"givens\"):\n\t''' Embeds a point cloud into D dimensions using random orthogonal rotations '''\n\tdef givens(i,j,theta,n=2):\n\t\tG = np.eye(n)\n\t\tG[i,i] = np.cos(theta)\n\t\tG[j,j] = np.cos(theta)\n\t\tG[i,j] = -np.sin(theta)\n\t\tG[j,i] = np.sin(theta)\n\t\treturn(G)\n\n\t## Append zero columns up to dimension d\n\td = a.shape[1]\n\ta = np.hstack((a, np.zeros((mobius_sample.shape[0], D - d))))\n\n\t## Rotate into D-dimensions\n\tfrom itertools import combinations\n\tfor (i,j) in combinations(range(D), 2):\n\t\ttheta = np.random.uniform(0, 2*np.pi)\n\t\tG = givens(i,j,theta,n=D)\n\t\ta = a @ G\n\treturn(a)\n\n# # %% Visualize isomap \n# from tallem.isomap import isomap\n# embedded_isomap = isomap(M, d = 3, k=5)\n# ax = pyplot.axes(projection='3d')\n# ax.scatter3D(embedded_isomap[:,0], embedded_isomap[:,1], embedded_isomap[:,2], c=\"red\",s=0.20)\n\n# # %% Sklearn isomap to verify\n# from sklearn.manifold import Isomap\n# embedded_isomap = Isomap(n_components=3, n_neighbors=5).fit_transform(M)\n# ax = pyplot.axes(projection='3d')\n# ax.scatter3D(embedded_isomap[:,0], embedded_isomap[:,1], embedded_isomap[:,2], c=\"red\",s=0.20)\n\n# # %% MDS \n# from tallem.mds import classical_MDS\n# from tallem.distance import dist\n# embedded_cmds = classical_MDS(dist(M, as_matrix=True), k = 3)\n# ax = pyplot.axes(projection='3d')\n# ax.scatter3D(embedded_cmds[:,0], embedded_cmds[:,1], embedded_cmds[:,2], c=\"red\",s=0.20)\n\n# # %% Distortion\n# dist_truth = dist(mobius_sample)\n# print(np.linalg.norm(dist(M) - dist_truth))\n# print(np.linalg.norm(dist(embedded_isomap) - dist_truth))\n# print(np.linalg.norm(dist(embedded_cmds) - dist_truth))\n" ]
[ [ "numpy.sqrt", "numpy.fliplr", "numpy.arange", "numpy.eye", "numpy.flatnonzero", "numpy.ones", "numpy.linalg.eigh", "numpy.mean", "numpy.flip", "numpy.zeros", "numpy.sum" ], [ "numpy.dot", "matplotlib.pyplot.imshow", "numpy.sqrt", "numpy.linspace", "numpy.max", "numpy.eye", "numpy.sin", "matplotlib.pyplot.gcf", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots_adjust", "numpy.ravel", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.tan", "numpy.meshgrid", "numpy.array", "numpy.sum", "scipy.ndimage.gaussian_filter", "numpy.random.seed", "scipy.spatial.Delaunay", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.linalg.norm", "numpy.bitwise_and", "numpy.prod", "numpy.random.uniform", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jessierliu/ecogVIS
[ "c97e79a20b3af1074a3a5e1f1ad864a580c97e04" ]
[ "ecogvis/functions/htk_to_nwb/chang2nwb.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nConvert ECoG to NWB.\n\n:Author: Ben Dichter, Jessie R. Liu\nModified by Luiz Tauffer on May 30, 2020\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom datetime import datetime\nfrom os import path\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom hdmf.backends.hdf5 import H5DataIO\nfrom ndx_ecog import ECoGSubject\nfrom ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\nfrom pynwb.file import DynamicTableRegion\nfrom pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\nfrom pynwb.ecephys import ElectricalSeries, LFP\nimport scipy.io as sio\nfrom scipy.io.wavfile import read as wavread\nfrom tqdm import tqdm\n\nfrom ecogvis.functions.htk_to_nwb.HTK import readHTK\n\n\n# get_manager must come after dynamic imports\nmanager = get_manager()\n\n\ndef get_analog(anin_path, num=1):\n \"\"\"\n Load analog data. Try:\n 1) analog[num].wav\n 2) ANIN[num].htk\n\n Parameters\n ----------\n blockpath: str\n num: int\n\n Returns\n -------\n fs, data\n \"\"\"\n wav_path = path.join(anin_path, 'analog' + str(num) + '.wav')\n if os.path.isfile(wav_path):\n rate, data = wavread(wav_path)\n return float(rate), np.array(data, dtype=float)\n htk_path = path.join(anin_path, 'ANIN' + str(num) + '.htk')\n if os.path.isfile(htk_path):\n htk_out = readHTK(htk_path, scale_s_rate=True)\n return htk_out['sampling_rate'], htk_out['data'].ravel()\n print('no analog path found for ' + str(num))\n return None, None\n\n\ndef readhtks(htk_path, elecs=None, use_tqdm=True):\n # First fix the order of htk files\n all_files = np.array([f for f in Path(htk_path).glob('*.htk')])\n numbers = [f.name.split('.')[0].split('Wav')[1] for f in Path(htk_path).glob('*.htk') if '._' not in str(f)]\n new_numbers = [n[0] + '0' + n[1] if len(n) == 2 else n for n in numbers]\n sorted_index = np.argsort(new_numbers)\n sorted_files = all_files[sorted_index]\n # Load data from files in correct order\n data = []\n if use_tqdm:\n this_iter = tqdm(sorted_files, desc='reading electrodes')\n else:\n this_iter = sorted_files\n for i in this_iter:\n htk = readHTK(i, scale_s_rate=True)\n data.append(htk['data'])\n data = np.stack(data)\n if len(data.shape) == 3:\n data = data.transpose([2, 0, 1])\n\n rate = htk['sampling_rate']\n\n return rate, data.squeeze()\n\n\ndef get_bad_elecs(blockpath):\n bad_channels_file = os.path.join(blockpath, 'Artifacts', 'badChannels.txt')\n\n # I think bad channels is 1-indexed but I'm not sure\n if os.path.isfile(bad_channels_file) and os.stat(\n bad_channels_file).st_size:\n dat = pd.read_csv(bad_channels_file, header=None, delimiter=' ',\n engine='python')\n bad_elecs_inds = dat.values.ravel() - 1\n bad_elecs_inds = bad_elecs_inds[np.isfinite(bad_elecs_inds)]\n else:\n bad_elecs_inds = []\n\n return bad_elecs_inds\n\n\ndef elecs_to_electrode_table(nwbfile, elecspath):\n \"\"\"\n Takes an NWB file and the elecs .mat file path, loads the anatomical and\n location information for each electrode,\n and writes this information to the NWB file.\n\n Parameters:\n -----------\n nwbfile : object\n An NWB file object.\n elecspath : str\n Path to the TDT_elecs_all.mat file for this subject. First, second,\n and third columns of the key 'elecmatrix'\n should be x, y, and z coordinates, respectively. For the 'anatomy'\n field, second column should be the full electrode label and the\n fourth column should be the anatomical location name.\n\n Returns:\n --------\n nwb_file : object\n The edited NWB file with the added electrode information.\n \"\"\"\n\n # Get anatomical and location information for electrodes.\n elec_mat = sio.loadmat(elecspath)\n labels = elec_mat['anatomy'][:, 1]\n location = elec_mat['anatomy'][:, 3]\n x = elec_mat['elecmatrix'][:, 0]\n y = elec_mat['elecmatrix'][:, 1]\n z = elec_mat['elecmatrix'][:, 2]\n\n # Get MNI warped electrode coordinates.\n if Path(elecspath.as_posix().split('.')[0] + '_warped.mat').is_file():\n elec_mat_warped = sio.loadmat(str(elecspath).split('.')[0] + '_warped.mat')\n x_warped = elec_mat_warped['elecmatrix'][:, 0]\n y_warped = elec_mat_warped['elecmatrix'][:, 1]\n z_warped = elec_mat_warped['elecmatrix'][:, 2]\n else:\n print('No warped electrode information found...filling with zeros.')\n x_warped = np.zeros_like(x)\n y_warped = np.zeros_like(y)\n z_warped = np.zeros_like(z)\n\n # Define electrode device label names.\n group_labels = []\n for current_group in labels:\n name = current_group[0].rstrip('0123456789')\n # Replace 'NaN' for 'null'\n if name == 'NaN':\n name = 'null'\n group_labels.append(name)\n\n # Get the list of unique electrode device label names\n unique_group_indexes = np.unique(group_labels, return_index=True)[1]\n unique_group_labels = [group_labels[f] for f in sorted(unique_group_indexes)]\n\n # Add additional columns to the electodes table.\n nwbfile.add_electrode_column('label', 'label of electrode')\n nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')\n nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('null', 'if not connected to real electrode')\n\n for group_label in unique_group_labels:\n # Get region name and device label for the group.\n if 'Depth' in group_label:\n brain_area = group_label.split('Depth')[0]\n elif 'Strip' in group_label:\n brain_area = group_label.split('Strip')[0]\n elif 'Grid' in group_label:\n brain_area = group_label.split('Grid')[0]\n elif 'Pole' in group_label:\n brain_area = group_label.split('Pole')[0]\n elif 'HeschlsGyrus' in group_label:\n brain_area = 'HeschlsGyrus'\n elif 'null' in group_label:\n brain_area = 'null'\n else:\n brain_area = 'other'\n\n # Create electrode device (same as the group).\n device = nwbfile.create_device(group_label)\n\n # Create electrode group with name, description, device object,\n # and general location.\n electrode_group = nwbfile.create_electrode_group(\n name='{} electrodes'.format(group_label),\n description='{}'.format(group_label),\n device=device,\n location=str(brain_area)\n )\n\n # Loop through the number of electrodes in this electrode group\n elec_nums = np.where(np.array(group_labels) == group_label)[0]\n for elec_num in elec_nums:\n # Add the electrode information to the table.\n elec_location = location[elec_num]\n if len(elec_location) == 0:\n # If no label is recorded for this electrode, set it to null\n elec_location = 'null'\n is_null = True\n else:\n elec_location = elec_location[0]\n is_null = False\n\n nwbfile.add_electrode(\n id=elec_num,\n x=x[elec_num],\n y=y[elec_num],\n z=z[elec_num],\n imp=np.nan,\n x_warped=x_warped[elec_num],\n y_warped=y_warped[elec_num],\n z_warped=z_warped[elec_num],\n location=str(elec_location),\n filtering='filtering',\n group=electrode_group,\n label=str(labels[elec_num][0]),\n bad=False,\n null=is_null,\n )\n\n return nwbfile\n\n\ndef chang2nwb(blockpath, out_file_path=None, save_to_file=False, htk_config=None):\n \"\"\"\n Parameters\n ----------\n blockpath: str\n out_file_path: None | str\n if None, output = [blockpath]/[blockname].nwb\n save_to_file : bool\n If True, saves to file. If False, just returns nwbfile object\n htk_config : dict\n Dictionary cotaining HTK conversion paths and options. Example:\n {\n ecephys_path: 'path_to/ecephys_htk_files',\n ecephys_type: 'raw', 'preprocessed' or 'high_gamma',\n analog_path: 'path_to/analog_htk_files',\n anin1: {present: True, name: 'microphone', type: 'acquisition'},\n anin2: {present: True, name: 'speaker1', type: 'stimulus'},\n anin3: {present: False, name: 'speaker2', type: 'stimulus'},\n anin4: {present: False, name: 'custom', type: 'acquisition'},\n metadata: metadata,\n electrodes_file: electrodes_file,\n bipolar_file: bipolar_file\n }\n\n Returns\n -------\n \"\"\"\n\n metadata = {}\n\n if htk_config is None:\n blockpath = Path(blockpath)\n else:\n blockpath = Path(htk_config['ecephys_path'])\n metadata = htk_config['metadata']\n blockname = blockpath.parent.name\n subject_id = blockpath.parent.parent.name[2:]\n\n if out_file_path is None:\n out_file_path = blockpath.resolve().parent / ''.join([blockname, '.nwb'])\n\n # file paths\n ecog_path = blockpath\n anin_path = htk_config['analog_path']\n bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')\n\n # Create the NWB file object\n nwbfile_dict = {\n 'session_description': blockname,\n 'identifier': blockname,\n 'session_start_time': datetime.now().astimezone(),\n 'institution': 'University of California, San Francisco',\n 'lab': 'Chang Lab'\n }\n if 'NWBFile' in metadata:\n nwbfile_dict.update(metadata['NWBFile'])\n nwbfile = NWBFile(**nwbfile_dict)\n\n # Read electrophysiology data from HTK files\n print('reading htk acquisition...', flush=True)\n ecog_rate, data = readhtks(ecog_path)\n data = data.squeeze()\n print('done', flush=True)\n\n # Get electrodes info from mat file\n if htk_config['electrodes_file'] is not None:\n nwbfile = elecs_to_electrode_table(\n nwbfile=nwbfile,\n elecspath=htk_config['electrodes_file'],\n )\n n_electrodes = nwbfile.electrodes[:].shape[0]\n all_elecs = list(range(n_electrodes))\n elecs_region = nwbfile.create_electrode_table_region(\n region=all_elecs,\n description='ECoG electrodes on brain'\n )\n else:\n ecephys_dict = {\n 'Device': [{'name': 'auto_device'}],\n 'ElectricalSeries': [{'name': 'ECoG', 'description': 'description'}],\n 'ElectrodeGroup': [{'name': 'auto_group', 'description': 'auto_group',\n 'location': 'location', 'device': 'auto_device'}]\n }\n if 'Ecephys' in metadata:\n ecephys_dict.update(metadata['Ecephys'])\n\n # Create devices\n for dev in ecephys_dict['Device']:\n device = nwbfile.create_device(dev['name'])\n\n # Electrode groups\n for el_grp in ecephys_dict['ElectrodeGroup']:\n device = nwbfile.devices[el_grp['device']]\n electrode_group = nwbfile.create_electrode_group(\n name=el_grp['name'],\n description=el_grp['description'],\n location=el_grp['location'],\n device=device\n )\n\n # Electrodes table\n n_electrodes = data.shape[1]\n nwbfile.add_electrode_column('label', 'label of electrode')\n nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')\n nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('null', 'if not connected to real electrode')\n bad_elecs_inds = get_bad_elecs(blockpath)\n for elec_counter in range(n_electrodes):\n bad = elec_counter in bad_elecs_inds\n nwbfile.add_electrode(\n id=elec_counter,\n x=np.nan,\n y=np.nan,\n z=np.nan,\n imp=np.nan,\n x_warped=np.nan,\n y_warped=np.nan,\n z_warped=np.nan,\n location='',\n filtering='none',\n group=electrode_group,\n label='',\n bad=bad,\n null=False,\n )\n\n all_elecs = list(range(n_electrodes))\n elecs_region = nwbfile.create_electrode_table_region(\n region=all_elecs,\n description='ECoG electrodes on brain'\n )\n\n # Get Bipolar table from file\n if htk_config['bipolar_file'] is not None:\n df = pd.read_csv(htk_config['bipolar_file'], index_col='id', sep='\\t')\n\n # Create bipolar scheme table\n bipolar_scheme_table = BipolarSchemeTable(\n name='bipolar_scheme_table',\n description='desc'\n )\n\n # Columns for bipolar scheme - all anodes and cathodes within the same\n # bipolar row are considered to have the same group and location\n bipolar_scheme_table.add_column(\n name='group_name',\n description='electrode group name'\n )\n bipolar_scheme_table.add_column(\n name='location',\n description='electrode location'\n )\n\n # Iterate over anode / cathode rows\n for i, r in df.iterrows():\n if isinstance(r['anodes'], str):\n anodes = [int(a) for a in r['anodes'].split(',')]\n else:\n anodes = [int(r['anodes'])]\n if isinstance(r['cathodes'], str):\n cathodes = [int(a) for a in r['cathodes'].split(',')]\n else:\n cathodes = [int(r['cathodes'])]\n bipolar_scheme_table.add_row(\n anodes=anodes,\n cathodes=cathodes,\n group_name=nwbfile.electrodes['group_name'][anodes[0]],\n location=nwbfile.electrodes['location'][anodes[0]]\n )\n\n bipolar_scheme_table.anodes.table = nwbfile.electrodes\n bipolar_scheme_table.cathodes.table = nwbfile.electrodes\n\n # Creates bipolar table region\n elecs_region = DynamicTableRegion(\n name='electrodes',\n data=np.arange(0, df.shape[0]),\n description='desc',\n table=bipolar_scheme_table\n )\n\n ecephys_ext = EcephysExt(name='ecephys_ext')\n ecephys_ext.bipolar_scheme_table = bipolar_scheme_table\n nwbfile.add_lab_meta_data(ecephys_ext)\n\n # Stores HTK electrophysiology data as raw, preprocessed or high gamma\n if htk_config['ecephys_type'] == 'raw':\n ecog_es = ElectricalSeries(name='ECoG',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n nwbfile.add_acquisition(ecog_es)\n elif htk_config['ecephys_type'] == 'preprocessed':\n lfp = LFP()\n ecog_es = ElectricalSeries(name='preprocessed',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n lfp.add_electrical_series(ecog_es)\n # Creates the ecephys processing module\n ecephys_module = nwbfile.create_processing_module(\n name='ecephys',\n description='preprocessed electrophysiology data'\n )\n ecephys_module.add_data_interface(lfp)\n elif htk_config['ecephys_type'] == 'high_gamma':\n ecog_es = ElectricalSeries(name='high_gamma',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n # Creates the ecephys processing module\n ecephys_module = nwbfile.create_processing_module(\n name='ecephys',\n description='preprocessed electrophysiology data'\n )\n ecephys_module.add_data_interface(ecog_es)\n\n # Add ANIN 1\n if htk_config['anin1']['present']:\n fs, data = get_analog(anin_path, 1)\n ts = TimeSeries(\n name=htk_config['anin1']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin1']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN1 saved with name \"', htk_config['anin1']['name'], '\" in ',\n htk_config['anin1']['type'])\n\n # Add ANIN 2\n if htk_config['anin2']['present']:\n fs, data = get_analog(anin_path, 2)\n ts = TimeSeries(\n name=htk_config['anin2']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin2']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN2 saved with name \"', htk_config['anin2']['name'], '\" in ',\n htk_config['anin2']['type'])\n\n # Add ANIN 3\n if htk_config['anin3']['present']:\n fs, data = get_analog(anin_path, 3)\n ts = TimeSeries(\n name=htk_config['anin3']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin3']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN3 saved with name \"', htk_config['anin3']['name'], '\" in ',\n htk_config['anin3']['type'])\n\n # Add ANIN 4\n if htk_config['anin4']['present']:\n fs, data = get_analog(anin_path, 4)\n ts = TimeSeries(\n name=htk_config['anin4']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin4']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN4 saved with name \"', htk_config['anin4']['name'], '\" in ',\n htk_config['anin4']['type'])\n\n # Add bad time segments\n if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:\n bad_time = sio.loadmat(bad_time_file)['badTimeSegments']\n for row in bad_time:\n nwbfile.add_invalid_time_interval(start_time=row[0],\n stop_time=row[1],\n tags=('ECoG artifact',),\n timeseries=ecog_es)\n\n # Subject\n subject_dict = {'subject_id': subject_id}\n if 'Subject' in metadata:\n subject_dict.update(metadata['Subject'])\n subject = ECoGSubject(**subject_dict)\n nwbfile.subject = subject\n\n if save_to_file:\n print('Saving HTK content to NWB file...')\n # Export the NWB file\n with NWBHDF5IO(str(out_file_path), manager=manager, mode='w') as io:\n io.write(nwbfile)\n\n # read check\n with NWBHDF5IO(str(out_file_path), manager=manager, mode='r') as io:\n io.read()\n print('NWB file saved: ', str(out_file_path))\n\n return nwbfile, out_file_path, subject_id, blockname\n" ]
[ [ "pandas.read_csv", "numpy.isfinite", "numpy.unique", "numpy.arange", "scipy.io.loadmat", "numpy.stack", "numpy.zeros_like", "numpy.argsort", "numpy.array", "scipy.io.wavfile.read" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tmddusgood/NUGU_movie_recommendation-1
[ "0c87638963d4681583f94def038dcd980270cb14" ]
[ "nugu/movie_comment_scrapper/build_model/load_model.py" ]
[ "from gensim.models.word2vec import Word2Vec\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport matplotlib\n\nfont_name = matplotlib.font_manager.FontProperties(\n fname=\"C:/Windows/Fonts/gulim.ttc\" # 한글 폰트 위치를 넣어주세요\n ).get_name()\nmatplotlib.rc('font', family=font_name)\n\ndef plot_2d_graph(vocabs, xs, ys): \n plt.figure(figsize=(8,6))\n plt.scatter(xs, ys, marker ='o')\n for i, v in enumerate(vocabs):\n plt.annotate(v, xy=(xs[i], ys[i]))\n plt.show()\n\nmodel = Word2Vec.load('../20191124_0200')\n\n\n\nword_vectors = model.wv\n\nprint(model.wv['김혜수'])\nprint(model.wv.most_similar('욕정'))\nprint(model.wv.most_similar('재미'))\nprint(model.wv.most_similar('재밌'))\nprint(model.most_similar(positive=['김혜수', '레이첼'], negative=['여자', '여배우'], topn=10))\nvocabs = word_vectors.vocab.keys()\nword_vectors_list = [word_vectors[v] for v in vocabs]\n\n# pca = PCA(n_components=2)\n# xys = pca.fit_transform(word_vectors_list)\n# xs = xys[:, 0]\n# ys = xys[:, 1]\n#\n#plot_2d_graph(vocabs, xs, ys)\n\n\n\n\n\n# from sklearn.decomposition import IncrementalPCA # inital reduction\n# from sklearn.manifold import TSNE # final reduction\n# import numpy as np # array handling\n# from gensim.models.word2vec import Word2Vec\n# import matplotlib.pyplot as plt\n# from sklearn.decomposition import PCA\n# import matplotlib\n# from plotly.offline import init_notebook_mode, iplot, plot\n# import plotly.graph_objs as go\n# import random\n#\n# font_name = matplotlib.font_manager.FontProperties(\n# fname=\"C:/Windows/Fonts/gulim.ttc\" # 한글 폰트 위치를 넣어주세요\n# ).get_name()\n# matplotlib.rc('font', family=font_name)\n# model = Word2Vec.load('20191123_2300')\n#\n# def reduce_dimensions(model):\n# num_dimensions = 2 # final num dimensions (2D, 3D, etc)\n#\n# vectors = [] # positions in vector space\n# labels = [] # keep track of words to label our data again later\n# for word in model.wv.vocab:\n# vectors.append(model.wv[word])\n# labels.append(word)\n#\n# # convert both lists into numpy vectors for reduction\n# vectors = np.asarray(vectors)\n# labels = np.asarray(labels)\n#\n# # reduce using t-SNE\n# vectors = np.asarray(vectors)\n# tsne = TSNE(n_components=num_dimensions, random_state=0)\n# vectors = tsne.fit_transform(vectors)\n#\n# x_vals = [v[0] for v in vectors]\n# y_vals = [v[1] for v in vectors]\n# return x_vals, y_vals, labels\n#\n#\n# x_vals, y_vals, labels = reduce_dimensions(model)\n#\n# def plot_with_plotly(x_vals, y_vals, labels, plot_in_notebook=True):\n#\n# trace = go.Scatter(x=x_vals, y=y_vals, mode='text', text=labels)\n# data = [trace]\n#\n# if plot_in_notebook:\n# init_notebook_mode(connected=True)\n# iplot(data, filename='word-embedding-plot')\n# else:\n# plot(data, filename='word-embedding-plot.html')\n#\n#\n# def plot_with_matplotlib(x_vals, y_vals, labels):\n#\n# random.seed(0)\n#\n# plt.figure(figsize=(12, 12))\n# plt.scatter(x_vals, y_vals)\n#\n# #\n# # Label randomly subsampled 25 data points\n# #\n# indices = list(range(len(labels)))\n# selected_indices = random.sample(indices, 25)\n# for i in selected_indices:\n# plt.annotate(labels[i], (x_vals[i], y_vals[i]))\n# plt.show()\n#\n# try:\n# get_ipython()\n# except Exception:\n# plot_function = plot_with_matplotlib\n# else:\n# plot_function = plot_with_plotly\n#\n# plot_function(x_vals, y_vals, labels)" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.annotate", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.show", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Repiphany/AoC
[ "d59badb62b82434bccd757e37d6d5c4d0bbf2838" ]
[ "2018/day_10/main.py" ]
[ "#!/usr/bin/env python3\n\nimport re\nimport numpy as np\nimport scipy.optimize\nimport sys\n\ndef moment(positions):\n center_of_mass = np.average(positions, axis = 0)\n return np.sum((positions - center_of_mass)**2)\n\ndef part_1(positions, velocities):\n f = lambda i : moment(positions + i*velocities)\n res = scipy.optimize.minimize(f, x0 = 0)\n pos_final = positions + int(res.x)*velocities\n x_min, y_min = np.min(pos_final, axis = 0).astype(int)\n x_max, y_max = np.max(pos_final, axis = 0).astype(int)\n for y in range(y_min, y_max + 1):\n for x in range(x_min, x_max + 1):\n if np.any(np.all((x, y) == pos_final, axis = 1)):\n sys.stdout.write('#')\n else:\n sys.stdout.write('.')\n sys.stdout.write('\\n')\n print(int(res.x))\n\nif __name__ == '__main__':\n positions, velocities = [], []\n with open('input', 'r') as f:\n for line in f:\n x, y, vx, vy = [int(i) for i in re.findall(r'-?\\d+', line)]\n positions.append((x, y))\n velocities.append((vx, vy))\n positions = np.asarray(positions, dtype = float)\n velocities = np.asarray(velocities, dtype = float)\n\n part_1(positions, velocities)\n\n" ]
[ [ "numpy.min", "numpy.asarray", "numpy.all", "numpy.max", "numpy.average", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lunarnautics/GamestonkTerminal
[ "a1564289c1f4071861240407a069ee57ecad8b84" ]
[ "gamestonk_terminal/common/residuals_analysis/residuals_api.py" ]
[ "\"\"\" Residuals API \"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nimport seaborn as sns\nfrom scipy import stats\nimport statsmodels.api as sm\n\n# pylint: disable=R0402\nimport matplotlib.gridspec as gridspec\nfrom statsmodels.graphics.gofplots import qqplot\nfrom statsmodels.tsa.stattools import adfuller, kpss, bds\nfrom statsmodels.stats.diagnostic import het_arch\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n plot_autoscale,\n check_positive,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal import feature_flags as gtff\n\n\nregister_matplotlib_converters()\n\n\ndef fit(\n other_args: List[str],\n ticker: str,\n stock: pd.Series,\n model_name: str,\n model: pd.Series,\n):\n \"\"\"Plot model fitting\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n stock : pd.Series\n Stock data\n model_name : str\n Model fitting name in use\n model : pd.Series\n Model fit data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"fit\",\n description=\"\"\"\n Plot model fitting\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n plt.plot(stock)\n plt.plot(model)\n plt.title(f\"{model_name} model fit on {ticker}\")\n plt.xlim(stock.index[0], stock.index[-1])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.legend([ticker, model_name])\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef res(\n other_args: List[str],\n ticker: str,\n stock: pd.Series,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Plot residuals\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n stock : pd.Series\n Stock data\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"res\",\n description=\"\"\"\n Plot residuals\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n plt.plot(stock[1:].index, residuals)\n plt.title(f\"Residuals from {model_name} model fit on {ticker}\")\n plt.xlim(stock[1:].index[0], stock.index[-1])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef hist(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Histogram and density curve\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"hist\",\n description=\"\"\"\n Histogram and density curve\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True)\n\n sns.distplot(\n residuals,\n bins=35,\n color=\"blue\",\n hist=True,\n hist_kws={\"edgecolor\": \"black\"},\n kde=True,\n kde_kws={\"color\": \"black\", \"lw\": 3, \"label\": \"KDE\"},\n rug=True,\n rug_kws={\"edgecolor\": \"orange\"},\n )\n plt.title(f\"Histogram with Density from {model_name} fit on {ticker}\")\n plt.ylabel(\"Density\")\n plt.xlabel(\"Share Price\")\n plt.grid(True)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef plot_qqplot(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Qqplot time series against a standard normal curve\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"qqplot\",\n description=\"\"\"\n Qqplot time series against a standard normal curve\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True)\n\n qqplot(residuals, stats.distributions.norm, fit=True, line=\"45\", ax=plt.gca())\n plt.title(f\"Q-Q plot residuals from {model_name} on {ticker}\")\n plt.ylabel(\"Sample quantiles\")\n plt.xlabel(\"Theoretical quantiles\")\n plt.grid(True)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef acf(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Plot (partial) auto-correlation function\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"acf\",\n description=\"\"\"\n Plot (partial) auto-correlation function\n \"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--lags\",\n dest=\"lags\",\n type=check_positive,\n default=40,\n help=\"maximum lags to display in plots\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n fig = plt.figure(\n figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True\n )\n spec = gridspec.GridSpec(ncols=1, nrows=2, figure=fig)\n\n # Auto-correlation function for original time series\n ax_acf = fig.add_subplot(spec[0, 0])\n sm.graphics.tsa.plot_acf(residuals, lags=ns_parser.lags, ax=ax_acf)\n plt.title(\n f\"Auto-Correlation function applied to Residuals from {model_name} on {ticker}\"\n )\n # Partial auto-correlation function for original time series\n ax_pacf = fig.add_subplot(spec[1, 0])\n sm.graphics.tsa.plot_pacf(residuals, lags=ns_parser.lags, ax=ax_pacf)\n plt.title(\n f\"Partial Auto-Correlation function applied to Residuals from {model_name} on {ticker}\"\n )\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef normality(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Normality tests\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"normality\",\n description=\"\"\"\n Normality tests\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Kurtosis\n # Measures height and sharpness of the central peak relative to that of a standard bell curve\n k, kpval = stats.kurtosistest(residuals)\n\n # Skewness\n # Measure of the asymmetry of the probability distribution of a random variable about its mean\n s, spval = stats.skewtest(residuals)\n\n # Jarque-Bera goodness of fit test on sample data\n # Tests if the sample data has the skewness and kurtosis matching a normal distribution\n jb, jbpval = stats.jarque_bera(residuals)\n\n # Shapiro\n # The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution.\n sh, shpval = stats.shapiro(residuals)\n\n l_statistic = [k, s, jb, sh]\n l_pvalue = [kpval, spval, jbpval, shpval]\n\n print(\n pd.DataFrame(\n [l_statistic, l_pvalue],\n columns=[\"Kurtosis\", \"Skewness\", \"Jarque-Bera\", \"Shapiro-Wilk\"],\n index=[\"Statistic\", \"p-value\"],\n )\n .round(5)\n .to_string()\n )\n\n print(\"\")\n kurtosis_val = stats.kurtosis(residuals, fisher=True)\n print(\"Kurtosis value: %.4f\" % kurtosis_val)\n skew_val = stats.skew(residuals)\n print(\"Skewness value: %.4f\" % skew_val)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef goodness(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Goodness of fit tests\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"goodness\",\n description=\"\"\"\n Goodness of fit tests\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Kolmogorov-Smirnov Test goodness of fit test on sample data\n ks, kspval = stats.kstest(residuals, \"norm\")\n print(\"Kolmogorov-Smirnov Test\")\n print(\"Statistic: %.4f\" % ks)\n print(\"p-value: %.4f\" % kspval)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef arch(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Autoregressive conditional heteroscedasticity with Engle's test\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"arch\",\n description=\"\"\"\n Autoregressive conditional heteroscedasticity with Engle's test\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Engle's Test for Autoregressive Conditional Heteroscedasticity (ARCH)\n lm, lmpval, fval, fpval = het_arch(residuals)\n print(\"Lagrange multiplier test statistic\")\n print(\"Statistic: %.4f\" % lm)\n print(\"p-value: %.4f\" % lmpval)\n print(\"\")\n print(\"fstatistic for F test\")\n print(\"Statistic: %.4f\" % fval)\n print(\"p-value: %.4f\" % fpval)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef unitroot(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Unit root test / stationarity (ADF, KPSS)\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"arch\",\n description=\"\"\"\n Unit root test / stationarity (ADF, KPSS)\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # The Augmented Dickey-Fuller test\n # Used to test for a unit root in a univariate process in the presence of serial correlation.\n # regression{‘c’,’ct’,’ctt’,’nc’} 'c' - Constant and 't'-trend order to include in regression\n # Note: 'ct' - The data is stationary around a trend\n result = adfuller(residuals, regression=\"c\")\n print(\"Augmented Dickey Fuller Test\")\n print(\"ADF Statistic: %.4f\" % result[0])\n print(\"p-value: %.4f\" % result[1])\n print(\"Used lags: %d\" % result[2])\n print(\"Num obs: %d\" % result[3])\n print(\"Critical Values:\")\n d = OrderedDict(sorted(result[4].items(), key=lambda t: t[1]))\n for key, value in d.items():\n print(f\"\\t{key}: {value:.3f}\")\n print(\"\")\n\n # Kwiatkowski-Phillips-Schmidt-Shin test\n # Test for level or trend stationarity\n # Note: regressionstr{‘c’, ‘ct’}\n # regressionstr{‘c’, ‘ct’} where:\n # ‘c’ : The data is stationary around a constant (default).\n # ‘ct’ : The data is stationary around a trend.\n # lags{None, ‘auto’, ‘legacy’}\n # see: https://www.statsmodels.org/dev/generated/statsmodels.tsa.stattools.kpss.html\n print(\"Kwiatkowski-Phillips-Schmidt-Shin Test\")\n result = kpss(residuals, regression=\"c\", nlags=\"auto\")\n print(\"KPSS Statistic: %.4f\" % result[0])\n print(\"Critical Values:\")\n d = OrderedDict(sorted(result[3].items(), key=lambda t: t[1], reverse=True))\n for key, value in d.items():\n print(f\"\\t{key}: {value:.3f}\")\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef independence(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Tests independent and identically distributed (i.i.d.) time series (BDS)\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"independence\",\n description=\"\"\"\n Tests independent and identically distributed (i.i.d.) time series (BDS)\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n result = bds(residuals, max_dim=6)\n print(\"BDS Test\")\n print(f\"Dim 2: z-static {result[0][0]:.4f} Prob {result[1][0]:.4f}\")\n print(f\"Dim 3: z-static {result[0][1]:.4f} Prob {result[1][1]:.4f}\")\n print(f\"Dim 4: z-static {result[0][2]:.4f} Prob {result[1][2]:.4f}\")\n print(f\"Dim 5: z-static {result[0][3]:.4f} Prob {result[1][3]:.4f}\")\n print(f\"Dim 6: z-static {result[0][4]:.4f} Prob {result[1][4]:.4f}\")\n\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.minorticks_on", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.gca", "pandas.plotting.register_matplotlib_converters", "matplotlib.gridspec.GridSpec", "scipy.stats.kurtosistest", "scipy.stats.skew", "matplotlib.pyplot.title", "scipy.stats.skewtest", "scipy.stats.kurtosis", "matplotlib.pyplot.show", "matplotlib.pyplot.ion", "matplotlib.pyplot.ylabel", "scipy.stats.kstest", "matplotlib.pyplot.xlim", "scipy.stats.shapiro", "matplotlib.pyplot.grid", "scipy.stats.jarque_bera", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
py1sl/neutronicstool
[ "da5e459e47bb67afbef9a9387e054023b6c0f048" ]
[ "format_from_EXCEL.py" ]
[ "\"\"\"\r\nThis tool reads in excel data, formats appropriately and plots graph of beam\r\ncurrent cycles over time.\r\nneeds xlrd package\r\n\"\"\"\r\n\r\nimport re\r\nimport pandas as pd\r\nimport logging\r\nimport utilities\r\n\r\n\r\ndef getdate():\r\n \"\"\"function to select appropriate start and end date for range of\r\n cycles we are interested in\r\n \"\"\"\r\n p = re.compile('[^0-9\\s]+')\r\n while True:\r\n date = input(\"Please input date in the format YYYY M D \\n\")\r\n m = p.search(date) # checks for any non numbers\r\n\r\n if m:\r\n logging.debug('Looks like you have a typo.')\r\n\r\n else:\r\n datelst = date.split() # splits along whitespace into list of strings\r\n datelst = list(map(int, datelst)) # converts list of strings into list of integers\r\n if datelst[1] > 12 or datelst[1] <= 0:\r\n logging.debug('Your month looks a little funny.')\r\n if datelst[2] > 31 or datelst[2] <= 0:\r\n logging.debug('Your day value looks strange.')\r\n else:\r\n logging.debug('I work!')\r\n return(datelst)\r\n # going to have to convert this string of integers into datetime data type\r\n\r\n\r\ndef findrng(date1, date2):\r\n \"\"\"\r\n Takes start and end date, finds the number of days between\r\n them.\r\n \"\"\"\r\n days = pd.date_range(date1, date2, freq='D')\r\n return days\r\n\r\n\r\ndef formatExcel(file):\r\n \"\"\"\r\n Takes data of interest in from excel file and formats to create a pandas\r\n dataframe. Currently acts on whole set of data.\r\n\r\n \"\"\"\r\n cols = \"B,C,I\"\r\n df = pd.read_excel(file, header=None, sheet_name='Data', skiprows=[0,1,2,3,4,5],na_values=['NA'], usecols = cols)\r\n df.columns = [\"Start\", \"Finish\", \"Average µA\"]\r\n df = df.drop(df.index[86:95])\r\n\r\n # Take start and end time for whole dataset\r\n # Date selectivity goes here, enter manually or select from excel file\r\n # check if we are in the correct range\r\n\r\n print(\"Please choose your start date\")\r\n start_date = getdate()\r\n print(start_date)\r\n\r\n print(\"Please choose your end date\")\r\n end_date = getdate()\r\n print(end_date)\r\n\r\n start_plot = pd.Timestamp(start_date[0], start_date[1], start_date[2], 0, 0, 0)\r\n end_plot = pd.Timestamp(end_date[0], end_date[1], end_date[2], 0, 0, 0)\r\n\r\n # Find range in days between start and end points\r\n rng = pd.date_range(start_plot, end_plot, freq='D')\r\n\r\n # Make empty dataset\r\n df0 = pd.DataFrame(index=rng, columns=[\"Average µA\"])\r\n df0 = df0.fillna(0)\r\n \r\n df['Dates'] = df.apply(lambda x: findrng(x['Start'], x['Finish']), axis=1)\r\n \"\"\"Uses findrng function on 'Start' and 'Finish' columns, creates a dataframe\r\n 'Dates' containing a set of days spanning each cycle run.\r\n \"\"\"\r\n\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"\"This loop takes each of the days in df['Dates'], matches it to its\r\n correct current value and appends that to our final dataframe df2.\r\n \"\"\"\r\n n = 0\r\n for j in df.iloc[:, 3]:\r\n n += 1\r\n for i in df.iloc[n-1][3]:\r\n df2 = df2.append({'Average µA': df.iloc[n-1][2], 'Dates': i}, ignore_index=True)\r\n\r\n df2 = df2.set_index('Dates')\r\n \"\"\"Uses dates column as index. \"\"\"\r\n\r\n df2 = df2.combine_first(df0)\r\n \"\"\"Ensures that empty values are set to zero through combining with an\r\n empty dataframe\"\"\"\r\n\r\n # chop data frame and only keep relevant data\r\n df2 = df2[start_plot:end_plot]\r\n\r\n return df2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n utilities.setup_logging()\r\n df2 = formatExcel('cyclemainoperationalparameters.xlsx')\r\n # select from menu which file to load\r\n utilities.plot_irrad(df2)\r\n" ]
[ [ "pandas.read_excel", "pandas.Timestamp", "pandas.DataFrame", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
cogitoergoread/muszi-macrohard.hu
[ "e9bbd36b789e670f96622a3a2ba8327f0d897561", "e9bbd36b789e670f96622a3a2ba8327f0d897561", "e9bbd36b789e670f96622a3a2ba8327f0d897561", "e9bbd36b789e670f96622a3a2ba8327f0d897561" ]
[ "rlcard3/agents/dqn_agent_pytorch.py", "tests/unittest/envs/test_blackjack_env.py", "rlcard3/envs/mahjong.py", "examples/measure/cmp-cfg.py" ]
[ "''' DQN agent\n\nThe code is derived from https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/dqn.py\n\nCopyright (c) 2019 Matthew Judell\nCopyright (c) 2019 DATA Lab at Texas A&M University\nCopyright (c) 2016 Denny Britz\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom copy import deepcopy\n\nfrom rlcard3.agents.dqn_agent import Memory\nfrom rlcard3.utils.utils import remove_illegal\n\nTransition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'done'])\n\n\nclass DQNAgent(object):\n '''\n Approximate clone of rlcard3.agents.dqn_agent.DQNAgent\n that depends on PyTorch instead of Tensorflow\n '''\n def __init__(self,\n scope,\n replay_memory_size=20000,\n replay_memory_init_size=100,\n update_target_estimator_every=1000,\n discount_factor=0.99,\n epsilon_start=1.0,\n epsilon_end=0.1,\n epsilon_decay_steps=20000,\n batch_size=32,\n action_num=2,\n state_shape=None,\n train_every=1,\n mlp_layers=None,\n learning_rate=0.00005,\n device=None):\n\n '''\n Q-Learning algorithm for off-policy TD control using Function Approximation.\n Finds the optimal greedy policy while following an epsilon-greedy policy.\n\n Args:\n scope (str): The name of the DQN agent\n replay_memory_size (int): Size of the replay memory\n replay_memory_init_size (int): Number of random experiences to sampel when initializing\n the reply memory.\n update_target_estimator_every (int): Copy parameters from the Q estimator to the\n target estimator every N steps\n discount_factor (float): Gamma discount factor\n epsilon_start (int): Chance to sample a random action when taking an action.\n Epsilon is decayed over time and this is the start value\n epsilon_end (int): The final minimum value of epsilon after decaying is done\n epsilon_decay_steps (int): Number of steps to decay epsilon over\n batch_size (int): Size of batches to sample from the replay memory\n evaluate_every (int): Evaluate every N steps\n action_num (int): The number of the actions\n state_space (list): The space of the state vector\n train_every (int): Train the network every X steps.\n mlp_layers (list): The layer number and the dimension of each layer in MLP\n learning_rate (float): The learning rate of the DQN agent.\n device (torch.device): whether to use the cpu or gpu\n '''\n self.use_raw = False\n self.scope = scope\n self.replay_memory_init_size = replay_memory_init_size\n self.update_target_estimator_every = update_target_estimator_every\n self.discount_factor = discount_factor\n self.epsilon_decay_steps = epsilon_decay_steps\n self.batch_size = batch_size\n self.action_num = action_num\n self.train_every = train_every\n\n # Torch device\n if device is None:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n else:\n self.device = device\n\n # Total timesteps\n self.total_t = 0\n\n # Total training step\n self.train_t = 0\n\n # The epsilon decay scheduler\n self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n # Create estimators\n self.q_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n self.target_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n\n # Create replay memory\n self.memory = Memory(replay_memory_size, batch_size)\n\n def feed(self, ts):\n ''' Store data in to replay buffer and train the agent. There are two stages.\n In stage 1, populate the memory without training\n In stage 2, train the agent every several timesteps\n\n Args:\n ts (list): a list of 5 elements that represent the transition\n '''\n (state, action, reward, next_state, done) = tuple(ts)\n self.feed_memory(state['obs'], action, reward, next_state['obs'], done)\n self.total_t += 1\n tmp = self.total_t - self.replay_memory_init_size\n if tmp>=0 and tmp%self.train_every == 0:\n self.train()\n\n def step(self, state):\n ''' Predict the action for genrating training data but\n have the predictions disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n A = self.predict(state['obs'])\n A = remove_illegal(A, state['legal_actions'])\n action = np.random.choice(np.arange(len(A)), p=A)\n return action\n\n def eval_step(self, state):\n ''' Predict the action for evaluation purpose.\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]\n probs = remove_illegal(np.exp(q_values), state['legal_actions'])\n best_action = np.argmax(probs)\n return best_action, probs\n\n def predict(self, state):\n ''' Predict the action probabilities but have them\n disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n q_values (numpy.array): a 1-d array where each entry represents a Q value\n '''\n epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]\n A = np.ones(self.action_num, dtype=float) * epsilon / self.action_num\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n\n def train(self):\n ''' Train the network\n\n Returns:\n loss (float): The loss of the current batch.\n '''\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample()\n\n # Calculate best next actions using Q-network (Double DQN)\n q_values_next = self.q_estimator.predict_nograd(next_state_batch)\n best_actions = np.argmax(q_values_next, axis=1)\n\n # Evaluate best next actions using Target-network (Double DQN)\n q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)\n target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \\\n self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]\n\n # Perform gradient descent update\n state_batch = np.array(state_batch)\n\n loss = self.q_estimator.update(state_batch, action_batch, target_batch)\n print('\\rINFO - Agent {}, step {}, rl-loss: {}'.format(self.scope, self.total_t, loss), end='')\n\n # Update the target estimator\n if self.train_t % self.update_target_estimator_every == 0:\n self.target_estimator = deepcopy(self.q_estimator)\n print(\"\\nINFO - Copied model parameters to target network.\")\n\n self.train_t += 1\n\n def feed_memory(self, state, action, reward, next_state, done):\n ''' Feed transition to memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n done (boolean): whether the episode is finished\n '''\n self.memory.save(state, action, reward, next_state, done)\n\n def get_state_dict(self):\n ''' Get the state dict to save models\n\n Returns:\n (dict): A dict of model states\n '''\n q_key = self.scope + '_q_estimator'\n q_value = self.q_estimator.qnet.state_dict()\n target_key = self.scope + '_target_estimator'\n target_value = self.target_estimator.qnet.state_dict()\n return {q_key: q_value, target_key: target_value}\n\n def load(self, checkpoint):\n ''' Load model\n\n Args:\n checkpoint (dict): the loaded state\n '''\n q_key = self.scope + '_q_estimator'\n self.q_estimator.qnet.load_state_dict(checkpoint[q_key])\n target_key = self.scope + '_target_estimator'\n self.target_estimator.qnet.load_state_dict(checkpoint[target_key])\n\nclass Estimator(object):\n '''\n Approximate clone of rlcard3.agents.dqn_agent.Estimator that\n uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.\n\n Q-Value Estimator neural network.\n This network is used for both the Q-Network and the Target Network.\n '''\n\n def __init__(self, action_num=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):\n ''' Initilalize an Estimator object.\n\n Args:\n action_num (int): the number output actions\n state_shape (list): the shape of the state space\n mlp_layers (list): size of outputs of mlp layers\n device (torch.device): whether to use cpu or gpu\n '''\n self.action_num = action_num\n self.learning_rate=learning_rate\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n self.device = device\n\n # set up Q model and place it in eval mode\n qnet = EstimatorNetwork(action_num, state_shape, mlp_layers)\n qnet = qnet.to(self.device)\n self.qnet = qnet\n self.qnet.eval()\n\n # initialize the weights using Xavier init\n for p in self.qnet.parameters():\n if len(p.data.shape) > 1:\n nn.init.xavier_uniform_(p.data)\n\n # set up loss function\n self.mse_loss = nn.MSELoss(reduction='mean')\n\n # set up optimizer\n self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)\n\n def predict_nograd(self, s):\n ''' Predicts action values, but prediction is not included\n in the computation graph. It is used to predict optimal next\n actions in the Double-DQN algorithm.\n\n Args:\n s (np.ndarray): (batch, state_len)\n\n Returns:\n np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated\n action values.\n '''\n with torch.no_grad():\n s = torch.from_numpy(s).float().to(self.device)\n q_as = self.qnet(s).cpu().numpy()\n return q_as\n\n def update(self, s, a, y):\n ''' Updates the estimator towards the given targets.\n In this case y is the target-network estimated\n value of the Q-network optimal actions, which\n is labeled y in Algorithm 1 of Minh et al. (2015)\n\n Args:\n s (np.ndarray): (batch, state_shape) state representation\n a (np.ndarray): (batch,) integer sampled actions\n y (np.ndarray): (batch,) value of optimal actions according to Q-target\n\n Returns:\n The calculated loss on the batch.\n '''\n self.optimizer.zero_grad()\n\n self.qnet.train()\n\n s = torch.from_numpy(s).float().to(self.device)\n a = torch.from_numpy(a).long().to(self.device)\n y = torch.from_numpy(y).float().to(self.device)\n\n # (batch, state_shape) -> (batch, action_num)\n q_as = self.qnet(s)\n\n # (batch, action_num) -> (batch, )\n Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)\n\n # update model\n batch_loss = self.mse_loss(Q, y)\n batch_loss.backward()\n self.optimizer.step()\n batch_loss = batch_loss.item()\n\n self.qnet.eval()\n\n return batch_loss\n\n\nclass EstimatorNetwork(nn.Module):\n ''' The function approximation network for Estimator\n It is just a series of tanh layers. All in/out are torch.tensor\n '''\n\n def __init__(self, action_num=2, state_shape=None, mlp_layers=None):\n ''' Initialize the Q network\n\n Args:\n action_num (int): number of legal actions\n state_shape (list): shape of state tensor\n mlp_layers (list): output size of each fc layer\n '''\n super(EstimatorNetwork, self).__init__()\n\n self.action_num = action_num\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n\n # build the Q network\n layer_dims = [np.prod(self.state_shape)] + self.mlp_layers\n fc = [nn.Flatten()]\n fc.append(nn.BatchNorm1d(layer_dims[0]))\n for i in range(len(layer_dims)-1):\n fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))\n fc.append(nn.Tanh())\n fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))\n self.fc_layers = nn.Sequential(*fc)\n\n def forward(self, s):\n ''' Predict action values\n\n Args:\n s (Tensor): (batch, state_shape)\n '''\n return self.fc_layers(s)\n", "import unittest\nimport numpy as np\n\nimport rlcard3\nfrom rlcard3.agents.random_agent import RandomAgent\n\n\nclass TestBlackjackEnv(unittest.TestCase):\n\n def test_init_and_extract_state(self):\n env = rlcard3.make('blackjack')\n state, _ = env.init_game()\n for score in state['obs']:\n self.assertLessEqual(score, 30)\n\n def test_decode_action(self):\n env = rlcard3.make('blackjack')\n self.assertEqual(env._decode_action(0), 'hit')\n self.assertEqual(env._decode_action(1), 'stand')\n\n def test_get_legal_actions(self):\n env = rlcard3.make('blackjack')\n actions = env._get_legal_actions()\n self.assertEqual(len(actions), 2)\n self.assertEqual(actions[0], 0)\n self.assertEqual(actions[1], 1)\n\n def test_get_payoffs(self):\n env = rlcard3.make('blackjack')\n for _ in range(100):\n env.init_game()\n while not env.is_over():\n action = np.random.choice([0, 1])\n env.step(action)\n payoffs = env.get_payoffs()\n for payoff in payoffs:\n self.assertIn(payoff, [-1, 1, 0])\n\n def test_step_back(self):\n env = rlcard3.make('blackjack', config={'allow_step_back':True})\n _, player_id = env.init_game()\n env.step(1)\n _, back_player_id = env.step_back()\n self.assertEqual(player_id, back_player_id)\n self.assertEqual(env.step_back(), False)\n\n env = rlcard3.make('blackjack')\n with self.assertRaises(Exception):\n env.step_back()\n\n def test_run(self):\n env = rlcard3.make('blackjack')\n env.set_agents([RandomAgent(env.action_num)])\n trajectories, _ = env.run(is_training=False)\n self.assertEqual(len(trajectories), 1)\n trajectories, _ = env.run(is_training=True, seed=1)\n self.assertEqual(len(trajectories), 1)\n\nif __name__ == '__main__':\n unittest.main()\n", "import numpy as np\n\nfrom rlcard3.envs.env import Env\nfrom rlcard3.games.mahjong.game import MahjongGame as Game\nfrom rlcard3.games.mahjong.card import MahjongCard as Card\nfrom rlcard3.games.mahjong.utils import card_encoding_dict, encode_cards, pile2list\n\nclass MahjongEnv(Env):\n ''' Mahjong Environment\n '''\n\n def __init__(self, config):\n self.game = Game()\n super().__init__(config)\n self.action_id = card_encoding_dict\n self.de_action_id = {self.action_id[key]: key for key in self.action_id.keys()}\n self.state_shape = [6, 34, 4]\n\n def _extract_state(self, state):\n ''' Encode state\n\n Args:\n state (dict): dict of original state\n\n Returns:\n numpy array: 6*5*15 array\n 6 : current hand\n the union of the other two players' hand\n the recent three actions\n the union of all played cards\n '''\n players_pile = state['players_pile']\n hand_rep = encode_cards(state['current_hand'])\n piles_rep = []\n for p in players_pile.keys():\n piles_rep.append(encode_cards(pile2list(players_pile[p])))\n piles_rep = np.array(piles_rep)\n table_rep = encode_cards(state['table'])\n rep = [hand_rep, table_rep]\n rep.extend(piles_rep)\n obs = np.array(rep)\n\n extracted_state = {'obs': obs, 'legal_actions': self._get_legal_actions()}\n if self.allow_raw_data:\n extracted_state['raw_obs'] = state\n extracted_state['raw_legal_actions'] = [a for a in state['action_cards']]\n if self.record_action:\n extracted_state['action_record'] = self.action_recorder\n return extracted_state\n\n def get_payoffs(self):\n ''' Get the payoffs of players. Must be implemented in the child class.\n\n Returns:\n payoffs (list): a list of payoffs for each player\n '''\n _, player, _ = self.game.judger.judge_game(self.game)\n if player == -1:\n payoffs = [0, 0, 0, 0]\n else:\n payoffs = [-1, -1, -1, -1]\n payoffs[player] = 1\n return payoffs\n\n def _decode_action(self, action_id):\n ''' Action id -> the action in the game. Must be implemented in the child class.\n\n Args:\n action_id (int): the id of the action\n\n Returns:\n action (string): the action that will be passed to the game engine.\n '''\n action = self.de_action_id[action_id]\n if action_id < 34:\n candidates = self.game.get_legal_actions(self.game.get_state(self.game.round.current_player))\n for card in candidates:\n if card.get_str() == action:\n action = card\n break\n return action\n\n def _get_legal_actions(self):\n ''' Get all legal actions for current state\n\n Returns:\n if type(legal_actions[0]) == Card:\n print(\"GET:\", [c.get_str() for c in legal_actions])\n else:\n print(legal_actions)\n legal_actions (list): a list of legal actions' id\n '''\n legal_action_id = []\n legal_actions = self.game.get_legal_actions(self.game.get_state(self.game.round.current_player))\n if legal_actions:\n for action in legal_actions:\n if isinstance(action, Card):\n action = action.get_str()\n action_id = self.action_id[action]\n legal_action_id.append(action_id)\n else:\n print(\"##########################\")\n print(\"No Legal Actions\")\n print(self.game.judger.judge_game(self.game))\n print(self.game.is_over())\n print([len(p.pile) for p in self.game.players])\n #print(self.game.get_state(self.game.round.current_player))\n #exit()\n return legal_action_id\n", "\"\"\"\n File name: examples/measure/cmp-cfg.py\n Author: József Varga\n Date created: 4/06/2020\n Compare various agents\n\"\"\"\nimport os\nfrom typing import List\nimport io\nfrom urllib.request import urlopen\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfrom rlcard3.games.mocsar.agentdb import str_to_agent_dict, get_by_id\nfrom rlcard3.utils.config_read import Config\n\nconf = Config('environ.properties')\n# PATH Const\nLOG_SAVE_PRFX = conf.get_str(section='cfg.compare', key=\"stat_dir_path\")\nPNG_SAVE_PRFX = conf.get_str(section='cfg.visual', key=\"png_dir_path\")\nlog_dirname = conf.get_str(section='cfg.visual', key=\"dir_name\")\nlog_filename = conf.get_str(section='cfg.visual', key=\"file_name\")\n\n\ndef read_data_local() -> pd.DataFrame:\n csv_file_name = os.path.join(LOG_SAVE_PRFX, log_dirname, log_filename)\n dfr = pd.read_csv(csv_file_name, sep=\";\", usecols=[\"cardnr\", \"agentid\", \"agentstr\", \"payoff\"])\n return dfr\n\ndef read_data_github(csv_url:str) -> pd.DataFrame:\n r1 = urlopen(csv_url)\n\n df1 = pd.read_csv(io.BytesIO(r1.read()),\n compression='gzip',\n sep=\";\",\n usecols=[\"cardnr\", \"agentid\", \"agentstr\", \"payoff\"])\n return df1\n\n\ndef create_plots(df: pd.DataFrame, agentstr: str, plot_type: str):\n \"\"\"\n Create nice figures from the played data\n :param df: DataFrame containing the logs of the games\n :param agentstr: Which type of data to display\n :param plot_type: VIO: Violin plot, LIN: Line plot\n \"\"\"\n if plot_type in [ \"CMR\", 'CMM']:\n # Compare performance against Random agents\n agentstrli = agentstr.split('-')\n if plot_type == \"CMR\":\n # Filter out the result of a RandomAgent\n ag_id_str = 'R'\n else:\n # Filter out the result of a MinPlus agent\n ag_id_str = 'M'\n ag_li_str = ','.join([get_by_id(agstr.replace(ag_id_str , \"\")[0]).aname for agstr in agentstrli])\n\n # Plays from the list\n df2 = df[df.agentstr.isin(agentstrli)]\n df2 = df2[df2['agentid'] != ag_id_str]\n title = f\"Mean payoff against Random agent for {ag_li_str[:20]}\"\n plt_filename = f\"Randvs2Ags2L_{plot_type}.png\"\n _ = sns.relplot(x=\"cardnr\", y=\"payoff\", kind=\"line\", data=df2, hue=\"agentstr\")\n else:\n ag_di = str_to_agent_dict(agentstr, False)\n ag_1, ag_2 = get_by_id(list(ag_di.keys())[0]), get_by_id(list(ag_di.keys())[1])\n\n if plot_type == 'VIO':\n # Violin plot comparing agent performace\n title = f\"Payoff distribution for {ag_di[ag_1.aid]} {ag_1.aname} and {ag_di[ag_2.aid]} {ag_2.aname}\"\n plt_filename = f\"Ag2vs2V_{agentstr}.png\"\n _ = sns.violinplot(x=\"cardnr\", y=\"payoff\", data=df[(df['agentstr'] == agentstr)], hue=\"agentid\",\n split=True)\n elif plot_type == 'LIN':\n # Line plot comparing agent vs agent results\n title = f\"Mean payoff for {ag_di[ag_1.aid]} {ag_1.aname} and {ag_di[ag_2.aid]} {ag_2.aname}\"\n plt_filename = f\"Ag2vs2L_{agentstr}.png\"\n _ = sns.relplot(x=\"cardnr\", y=\"payoff\", kind=\"line\", data=df[(df['agentstr'] == agentstr)], hue=\"agentid\")\n plt.title(title)\n #plt.tight_layout()\n plt.subplots_adjust(top=0.88)\n\n dir_path = os.path.join(PNG_SAVE_PRFX, log_dirname)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n plt.savefig(os.path.join(dir_path, plt_filename))\n plt.show()\n\n\ndef plot_list_items(pl_list: List):\n df = read_data_local()\n for plot_element in pl_list:\n plot_type, agentstr = plot_element.split(':')\n create_plots(df, agentstr, plot_type)\n\n\nplot_list_items(conf.get_str(section='cfg.visual', key=\"plot_list\").split(','))\n\n#df2 = read_data_github(csv_url='https://github.com/cogitoergoread/rlcard3/raw/master/jupyter/data'\n# '/3RAS_Rule_vs_RLAI_1000_20200414-002659.csv.gz')\n# create_plots(df2, 'kkll', 'LIN')\n#create_plots(df2, 'RRkk-RRll-RRii-RRjj-PPRR', 'CMR')\n#create_plots(df2, 'MMkk-MMll-MMii-MMjj-MMPP', 'CMM')" ]
[ [ "numpy.expand_dims", "numpy.linspace", "torch.no_grad", "torch.cuda.is_available", "numpy.exp", "numpy.arange", "torch.from_numpy", "numpy.argmax", "torch.nn.Sequential", "torch.nn.BatchNorm1d", "numpy.invert", "torch.nn.Linear", "numpy.array", "torch.nn.Flatten", "numpy.ones", "torch.nn.Tanh", "numpy.prod", "torch.nn.init.xavier_uniform_", "torch.nn.MSELoss" ], [ "numpy.random.choice" ], [ "numpy.array" ], [ "pandas.read_csv", "matplotlib.pyplot.show", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots_adjust" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
LorenFrankLab/rec_to_nwb
[ "d0630f414662963ebbe23aedf8f3ce07628636bc", "d0630f414662963ebbe23aedf8f3ce07628636bc" ]
[ "rec_to_nwb/processing/nwb/components/iterator/multi_thread_data_iterator.py", "rec_to_nwb/scripts/visualise_timestamp_distances.py" ]
[ "import concurrent.futures\n\nimport numpy as np\nfrom hdmf.data_utils import DataChunk\nfrom rec_to_nwb.processing.nwb.components.iterator.data_iterator import \\\n DataIterator\n\n\nclass MultiThreadDataIterator(DataIterator):\n def __init__(self, data, number_of_threads=6):\n DataIterator.__init__(self, data)\n self.number_of_threads = number_of_threads\n\n def __next__(self):\n if self._current_index < self.number_of_steps:\n number_of_threads_in_current_step = min(\n self.number_of_threads,\n self.number_of_files_in_single_dataset - self.current_file)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n threads = [executor.submit(\n MultiThreadDataIterator.get_data_from_file,\n self.data, self.current_dataset, self.current_file + i)\n for i in range(number_of_threads_in_current_step)]\n data_from_multiple_files = ()\n for thread in threads:\n data_from_multiple_files += (thread.result(),)\n stacked_data_from_multiple_files = np.hstack(\n data_from_multiple_files)\n selection = self.get_selection(\n number_of_threads=number_of_threads_in_current_step,\n current_dataset=self.current_dataset,\n dataset_file_length=self.dataset_file_length,\n current_file=self.current_file,\n number_of_rows=self.number_of_rows)\n data_chunk = DataChunk(\n data=stacked_data_from_multiple_files, selection=selection)\n\n self._current_index += number_of_threads_in_current_step\n self.current_file += number_of_threads_in_current_step\n\n if self.current_file >= self.number_of_files_in_single_dataset:\n self.current_dataset += 1\n self.current_file = 0\n\n del stacked_data_from_multiple_files\n return data_chunk\n\n raise StopIteration\n\n next = __next__\n\n @staticmethod\n def get_data_from_file(data, current_dataset, current_file):\n return np.transpose(data.read_data(current_dataset, current_file))\n\n\n# TODO: finish this code and move to new file when data are extracted in a single file.\nclass ChunkedDataIterator(DataIterator):\n def __init__(self, data, number_of_threads=6, read_chunk_mb=100):\n DataIterator.__init__(self, data)\n self.number_of_threads = number_of_threads\n self.read_chunk_mb = read_chunk_mb\n # Figure out the size of each datafile in each dataset where one dataset is an epoch\n self.dataset_file_dims = ()\n for dataset in range(self.number_of_datasets):\n self.dataset_file_dims.append(data.get_data_dims(dataset, 0))\n\n def __next__(self):\n if self._current_index < self.number_of_steps:\n number_of_threads_in_current_step = min(\n self.number_of_threads,\n self.number_of_files_in_single_dataset - self.current_file)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n threads = [executor.submit(\n MultiThreadDataIterator.get_data_from_file,\n self.data, self.current_dataset, self.current_file + i)\n for i in range(number_of_threads_in_current_step)]\n data_from_multiple_files = ()\n for thread in threads:\n data_from_multiple_files += (thread.result(),)\n stacked_data_from_multiple_files = np.hstack(\n data_from_multiple_files)\n selection = self.get_selection(\n number_of_threads=number_of_threads_in_current_step,\n current_dataset=self.current_dataset,\n dataset_file_length=self.dataset_file_length,\n current_file=self.current_file,\n number_of_rows=self.number_of_rows)\n data_chunk = DataChunk(\n data=stacked_data_from_multiple_files, selection=selection)\n\n self._current_index += number_of_threads_in_current_step\n self.current_file += number_of_threads_in_current_step\n\n if self.current_file >= self.number_of_files_in_single_dataset:\n self.current_dataset += 1\n self.current_file = 0\n\n del stacked_data_from_multiple_files\n return data_chunk\n\n raise StopIteration\n\n next = __next__\n\n @staticmethod\n def get_data_from_file(data, current_dataset, current_file):\n return np.transpose(data.read_data(current_dataset, current_file))\n", "import os\nfrom pathlib import Path\n\nimport pandas as pd\nfrom matplotlib import pyplot\nfrom mountainlab_pytools.mdaio import readmda\nfrom rec_to_binaries.read_binaries import readTrodesExtractedDataFile\nfrom rec_to_nwb.processing.metadata.metadata_manager import MetadataManager\nfrom rec_to_nwb.processing.time.continuous_time_extractor import \\\n ContinuousTimeExtractor\nfrom rec_to_nwb.processing.time.timestamp_converter import TimestampConverter\nfrom rec_to_nwb.processing.tools.data_scanner import DataScanner\n\npath = Path(__file__).parent.parent\npath.resolve()\n\n\ndef read_mda_timestamps(file):\n return readmda(file)\n\n\ndef read_pos_timestamps(file):\n pos_online = readTrodesExtractedDataFile(file)\n position = pd.DataFrame(pos_online['data'])\n return position.time.to_numpy(dtype='int64')\n\n\ndef get_posonline_data_file(dataset):\n all_pos = dataset.get_all_data_from_dataset('pos')\n for pos_file in all_pos:\n if pos_file.endswith('pos_online.dat'):\n return os.path.join(dataset.get_data_path_from_dataset('pos'),\n pos_file)\n return None\n\n\ndef extract_datasets(data_scanner, animal_name, date):\n data_scanner.extract_data_from_date_folder(date)\n dataset_names = data_scanner.get_all_epochs(date)\n return [data_scanner.data[animal_name][date][dataset]\n for dataset in dataset_names]\n\n\nif __name__ == \"__main__\":\n animal_name = 'beans'\n date = '20190718'\n data_path = 'C:/Users/wbodo/Desktop/resy/test/'\n # data_path = str(path) + '/test/test_data/'\n nwb_metadata = MetadataManager(\n str(path) + '/test/processing/res/metadata.yml',\n [str(path) + '/test/processing/res/probe1.yml',\n str(path) + '/test/processing/res/probe2.yml',\n str(path) + '/test/processing/res/probe3.yml']\n )\n data_scanner = DataScanner(data_path, animal_name, nwb_metadata)\n datasets = extract_datasets(data_scanner, animal_name, date)\n\n pos_timestamps_files = [get_posonline_data_file(\n dataset) for dataset in datasets]\n mda_timestamps_files = [dataset.get_mda_timestamps()\n for dataset in datasets]\n continuous_time_files = [dataset.get_continuous_time()\n for dataset in datasets]\n\n timestamps = [read_pos_timestamps(timestamps_file)\n for timestamps_file in pos_timestamps_files]\n continuous_time_extractor = ContinuousTimeExtractor()\n continuous_time_dicts = continuous_time_extractor.get_continuous_time_dict(\n continuous_time_files)\n\n distances = []\n max_distance = 0\n for i, continuous_time_dict in enumerate(continuous_time_dicts):\n converted_timestamps = TimestampConverter.convert_timestamps(\n continuous_time_dict, timestamps[i])\n for j in range(1, len(converted_timestamps) - 1):\n if converted_timestamps[j] > 0 and converted_timestamps[j - 1] > 0:\n new_dist = (\n converted_timestamps[j] - converted_timestamps[j - 1])\n if new_dist > max_distance:\n max_distance = new_dist\n\n distances.append(new_dist)\n\n pyplot.hist(distances, bins=500, range=(0, max_distance))\n pyplot.show()\n\n max_distance = 0\n distances = []\n timestamps = [read_mda_timestamps(timestamps_file)\n for timestamps_file in mda_timestamps_files]\n\n for i, continuous_time_dict in enumerate(continuous_time_dicts):\n converted_timestamps = TimestampConverter.convert_timestamps(\n continuous_time_dict, timestamps[i])\n for j in range(1, len(converted_timestamps) - 1):\n if converted_timestamps[j] > 0 and converted_timestamps[j - 1] > 0:\n new_dist = (\n converted_timestamps[j] - converted_timestamps[j - 1])\n if new_dist > max_distance:\n max_distance = new_dist\n\n distances.append(new_dist)\n\n pyplot.hist(distances, bins=4000, range=(0, max_distance))\n pyplot.show()\n" ]
[ [ "numpy.hstack" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
RodrigoSanMartin/deploy_API_sagemaker_pytorch_webapp_sentiment_analysis
[ "d7df00b91c05a96ccf0975c621704cacb9bb0a37" ]
[ "train/train.py" ]
[ "\nimport argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n # TODO: Paste the train() method developed in the notebook here.\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n \n model.zero_grad() # Reseting gradients for each batch \n forward_pass = model.forward(batch_X) #Perform a forward pass of our model on batch_X input. \n loss = loss_fn(forward_pass, batch_y) #Get the loss for this batch \n loss.backward() # Get Gradients \n optimizer.step() # New Parameters\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n \n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n # Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n # Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n" ]
[ [ "torch.load", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.nn.BCELoss", "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
farr/RVChallenge
[ "1925682f1a19442560401a7fd2a5b2dde9472f11" ]
[ "setup.py" ]
[ "from setuptools import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\nsetup(\n name='rvchallenge',\n version=\"0.0.1\",\n description='An attempt at competing in the RVChallenge.',\n long_description='See https://rv-challenge.wikispaces.com',\n\n author='Will M. Farr',\n author_email='[email protected]',\n\n license='MIT',\n\n packages=['rvchallenge'],\n\n ext_modules = cythonize([Extension('kepler', ['rvchallenge/kepler.pyx'])]),\n include_dirs = [np.get_include()]\n)\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vicgalle/ARAMARL
[ "1f0e3d3f76b345e12ec58029dc62d92f33738084" ]
[ "engine.py" ]
[ "\"\"\"\nThis module implements several environments, i.e., the simulators in which agents will interact and learn.\nAny environment is characterized by the following two methods:\n * step : receives the actions taken by the agents, and returns the new state of the simulator and the rewards\n perceived by each agent, amongst other things.\n * reset : sets the simulator at the initial state.\n\"\"\"\n\nimport numpy as np\n\n\nclass RMG():\n \"\"\"\n A two-agent environment for a repeated matrix (symmetric) game.\n Possible actions for each agent are (C)ooperate (0) and (D)efect (1).\n The state is s_t = (a_{t-1}, b_{t-1}) with a_{t-1} and b_{t-1} the actions of the two players in the last turn,\n plus an initial state s_0.\n \"\"\"\n # Possible actions\n NUM_AGENTS = 2\n NUM_ACTIONS = 2\n NUM_STATES = NUM_AGENTS*NUM_ACTIONS + 1 # we add the initial state.\n\n def __init__(self, max_steps, payouts, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.payout_mat = payouts\n self.available_actions = [\n np.ones((batch_size, self.NUM_ACTIONS), dtype=int)\n for _ in range(self.NUM_AGENTS)\n ]\n\n self.step_count = None\n\n def reset(self):\n self.step_count = 0\n init_state = np.zeros((self.batch_size, self.NUM_STATES))\n init_state[:, -1] = 1\n observations = [init_state, init_state]\n info = [{'available_actions': aa} for aa in self.available_actions]\n return observations, info\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n rewards = []\n\n # The state is a OHE vector indicating [CC, CD, DC, DD, initial], (iff NUM_STATES = 5)\n state0 = np.zeros((self.batch_size, self.NUM_STATES))\n state1 = np.zeros((self.batch_size, self.NUM_STATES))\n for i, (a0, a1) in enumerate(zip(ac0, ac1)): # iterates over batch dimension\n rewards.append([self.payout_mat[a1][a0], self.payout_mat[a0][a1]])\n state0[i, a0 * 2 + a1] = 1\n state1[i, a1 * 2 + a0] = 1\n rewards = list(map(np.asarray, zip(*rewards)))\n observations = [state0, state1]\n\n done = (self.step_count == self.max_steps)\n info = [{'available_actions': aa} for aa in self.available_actions]\n\n return observations, rewards, done, info\n\n\nclass AdvRw():\n \"\"\"\n A two-action stateless environment in which an adversary controls the reward\n \"\"\"\n\n def __init__(self, mode='friend', p=0.5):\n self._mode = mode\n # adversary estimation of our action\n self._policy = np.asarray([0.5, 0.5])\n self._learning_rate = 0.25\n self._p = p # probability for the neutral environment\n\n def reset(self):\n # self._policy = np.asarray([0.5, 0.5])\n return\n\n def step(self, action):\n\n if self._mode == 'friend':\n if np.argmax(self._policy) == action:\n reward = +50\n else:\n reward = -50\n elif self._mode == 'adversary':\n if np.argmax(self._policy) == action:\n reward = -50\n else:\n reward = +50\n elif self._mode == 'neutral':\n box = np.random.rand() < self._p\n if int(box) == action:\n reward = +50\n else:\n reward = -50\n\n self._policy = (self._learning_rate * np.array([1.0-action, action])\n + (1.0-self._learning_rate) * self._policy)\n self._policy /= np.sum(self._policy)\n\n # print('---')\n #print('r', reward)\n #print('p', self._policy)\n # print('---')\n\n return None, (reward, -reward), True, None\n\n\nclass AdvRw2():\n \"\"\"\n Friend or Foe modified to model adversary separately..\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n dm_reward = self.payout if ac0 == ac1 else -self.payout\n\n rewards = [dm_reward, -dm_reward] # Assuming zero-sum...\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n#\n\n\nclass AdvRwGridworld():\n \"\"\"\n Friend or Foe modified to model adversary separately, with gridworld\n \"\"\"\n\n def __init__(self, max_steps, batch_size=1):\n self.H = 4\n self.W = 3\n self.world = np.array([self.H, self.W]) # The gridworld\n\n self.targets = np.array([[0, 0], [0, 2]]) # Position of the targets\n self.DM = np.array([3, 1]) # Initial position of the DM\n\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions_DM = np.array(\n [0, 1, 2, 3]) # Up, right, down, left\n self.available_actions_Adv = np.array([0, 1]) # Select target 1 or 2.\n self.step_count = 0\n\n def reset(self):\n self.step_count = 0\n self.DM = np.array([3, 1])\n return\n\n def _coord2int(self, pos):\n return pos[0] + self.H*pos[1]\n\n def step(self, action):\n ac_DM, ac_Adv = action\n\n self.step_count += 1\n\n if ac_DM == 0: # Up\n self.DM[0] = np.maximum(0, self.DM[0] - 1)\n elif ac_DM == 1: # Right\n self.DM[1] = np.minimum(self.W - 1, self.DM[1] + 1)\n elif ac_DM == 2: # Down\n self.DM[0] = np.minimum(self.H - 1, self.DM[0] + 1)\n elif ac_DM == 3: # Left\n self.DM[1] = np.maximum(0, self.DM[1] - 1)\n\n done = False\n dm_reward = -1 # One step more\n adv_reward = 0\n\n # Check if DM is @ targets, then finish\n\n if np.all(self.DM == self.targets[0, :]):\n if ac_Adv == 0:\n dm_reward += 50\n adv_reward -= 50\n else:\n dm_reward -= 50\n adv_reward += 50\n done = True\n\n if np.all(self.DM == self.targets[1, :]):\n if ac_Adv == 1:\n dm_reward += 50\n adv_reward -= 50\n else:\n dm_reward -= 50\n adv_reward += 50\n done = True\n\n # Check if step limit, then finish\n\n if self.step_count == self.max_steps:\n done = True\n\n #dm_reward = self.payout if ac0 == ac1 else -self.payout\n\n # rewards = [dm_reward, -dm_reward] #Assuming zero-sum...\n #observations = None\n\n #done = (self.step_count == self.max_steps)\n\n return self._coord2int(self.DM), (dm_reward, adv_reward), done\n\n\nclass Blotto():\n \"\"\"\n Blotto game with multiple adversaries\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1, deterministic=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n #self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n self.deterministic = deterministic\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, actions):\n \"\"\" action[0] is that of the defender \"\"\"\n self.step_count += 1\n\n num_attackers = len(actions) - 1\n\n actions = np.asarray(actions)\n\n att_rew = np.sum(actions[1:, ], axis=0)\n tmp = actions[0, ] - att_rew\n\n draw_pos = tmp == 0\n if self.deterministic != True:\n tmp[tmp == 0] = np.random.choice(\n [-1, 1], size=len(tmp[tmp == 0]))*(actions[0, draw_pos] > 0)\n\n\n ind = np.sum(actions, axis=0) > 0 ## to see in which position there was at least one resource\n\n tmp = tmp*ind\n\n tmp[tmp < 0] = -1 # Defender looses corresponding position\n tmp[tmp > 0] = 1 # Defender wins corresponding position\n\n # print('tmp', tmp)\n\n reward_dm = tmp.sum()\n\n tmp2 = actions[1:, ] - actions[0, ]\n tmp2[tmp2 > 0] = 1\n tmp2[tmp2 < 0] = -1\n\n # print('tmp2', tmp2)\n\n # s = np.sum(actions[1:, draw_pos], axis=0)\n z = draw_pos & actions[1:, ]\n\n z_new = z/z.sum(axis=0)\n z_new = np.nan_to_num(z_new)\n z_new = z_new*ind\n\n # print('z_new', z_new)\n\n #z_new = np.zeros_like(z_new)\n z_new[:, draw_pos] = z_new[:, draw_pos]*np.sign(-tmp[draw_pos])\n\n tmp2[z == 1.] = 0\n\n # print('tmp2', tmp2)\n\n z_new = tmp2 + z_new\n\n # print('z-new', z_new)\n # print('tmp2', tmp2)\n\n rewards_atts = np.sum(z_new*(actions[1:, ] > 0), axis=1)\n\n rewards = [reward_dm]\n\n for r in rewards_atts:\n rewards.append(r)\n\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n\n\nclass modified_Blotto():\n \"\"\"\n Modified Blotto game with multiple adversaries (we just care about positions\n where there has been some attack)\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1, deterministic=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n #self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n self.deterministic = deterministic\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, actions):\n \"\"\" action[0] is that of the defender \"\"\"\n self.step_count += 1\n\n actions = np.asarray(actions)\n\n ## Defender's Reward\n att_rew = np.sum(actions[1:, ], axis=0)\n attacked_pos = att_rew > 0 ## indicates in which position attacks where performed\n\n tmp = actions[0, ] - att_rew\n tmp[np.logical_not(attacked_pos)] = 0.0\n\n # Code non-deterministic case ??\n\n tmp[tmp < 0] = -1 # Defender looses corresponding position\n tmp[tmp > 0] = 1 # Defender wins corresponding position\n reward_dm = tmp.sum()\n\n ## Attacker's Reward\n tmp_att = -tmp\n\n h = actions[1:] > 0\n units = tmp_att / np.sum(h, axis=0)\n units = np.nan_to_num(units)\n\n rewards_att = h*units\n rewards_atts = np.sum(rewards_att, axis=1)\n\n rewards = [reward_dm]\n\n for r in rewards_atts:\n rewards.append(r)\n\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n\n\nclass Urban():\n \"\"\"\n A two-agent environment for a urban resource allocation problem.\n \"\"\"\n\n def __init__(self):\n # The state is designated by s = (s_0, s_1, s_2, s_3)\n # s_0 represents wheter we are in the initial state or not\n # s_i, i>0 represent whether the attack was successful on the site i.\n self.state = np.array([1, 0, 0, 0])\n self.step_count = 0\n self.max_steps = 2 # as in the ARA for Urban alloc. paper\n self.payoffs = np.array([1., 0.75, 2.]) # v_i from the paper\n\n # Transition dynamics\n\n # p(s_1_i = 1 | d1_i, a_i) for site i\n self.p_s1_d1_a = np.array([[0, 0.85, 0.95],\n [0, 0.6, 0.75],\n [0, 0.3, 0.5],\n [0, 0.05, 0.1],\n [0, 0, 0.05]])\n\n # p(s_2_i = 1 | s_1_i, d2_i) for site i\n self.p_s2_s1_d2 = np.array([[0, 0, 0, 0, 0],\n [1., 0.95, 0.8, 0.6, 0.4]])\n\n self.n_sites = 3\n self.k = 0.005\n self.rho = 0.1\n self.c_A = 10.\n self.c_D = 10.\n\n self.available_actions_DM = [i for i in range(5**self.n_sites)] # up to four units in each site\n self.n_states = 2 ** (self.n_sites + 1)\n\n def state2idx(self, state):\n \"\"\"\n In [19]: state = np.array([1, 0, 0, 1])\n In [20]: state2idx(state)\n Out[20]: 9\n \"\"\"\n pows = np.array([1 << i for i in range(len(state))[::-1]])\n return np.dot(pows, state)\n\n def idx2state(self, idx):\n \"\"\"\n In [28]: idx = 9\n In [30]: idx2state(idx)\n Out[30]: array([1, 0, 0, 1])\n \"\"\"\n return (idx & (1 << np.arange(len(self.state))) > 0).astype(int)\n\n def actionDM2idx(self, a):\n \"\"\" Now we have 3 sites, in which we can defend with up to 5 units. \"\"\"\n pows = np.array([5**i for i in range(self.n_sites)[::-1]])\n return np.dot(pows, a)\n\n def idx2actionDM(self, idx):\n return list(map(int, (list(np.base_repr(idx, 5, padding=3))[-self.n_sites:])))\n\n def valid_actionDM(self, state_idx, action_idx, prev_action_idx):\n\n action = self.idx2actionDM(action_idx)\n prev_action = self.idx2actionDM(prev_action_idx)\n state = self.idx2state(state_idx)\n\n if state[0] == 1: #initial state\n #print('a', action)\n return np.sum(action) == 4\n else: # second move\n #print('b', action, prev_action)\n c1 = np.sum(action) == 4\n c2 = action[0] <= prev_action[0] + prev_action[1]\n c3 = action[1] <= prev_action[0] + prev_action[1] + prev_action[2]\n c4 = action[2] <= prev_action[1] + prev_action[2]\n return c1 & c2 & c3 & c4\n\n def reset(self):\n self.step_count = 0\n self.state = np.array([1, 0, 0, 0])\n return\n\n def step(self, action):\n\n # first action is that from the DM\n ac0, ac1 = action\n\n self.step_count += 1\n\n if self.step_count == 1:\n\n self.state = np.array([0, 0, 0, 0])\n for i in range(self.n_sites):\n p = self.p_s1_d1_a[ac0[i], ac1[i]]\n u = np.random.rand()\n if u <= p:\n self.state[i + 1] = 1 # success\n\n rewards = [0., 0.] # no rewards until end of episode\n observations = self.state\n\n done = False\n\n return observations, rewards, done\n\n elif self.step_count == 2: # end of episode\n\n for i in range(self.n_sites):\n p = self.p_s2_s1_d2[self.state[i+1], ac0[i]]\n u = np.random.rand()\n if u <= p:\n self.state[i + 1] = 1 # success\n\n done = True\n observations = self.state\n #print(np.dot(self.payoffs, self.state[1:]))\n rewards = [- np.exp(self.c_D * self.rho * np.dot(self.payoffs, self.state[1:])),\n np.exp(self.c_A * np.dot(self.payoffs,self.state[1:]) - np.sum(ac1 * self.k))] \n\n return observations, rewards, done\n\n\nclass SimpleCoin():\n \"\"\"\n Simple Coin Game from LOLA paper, where state is just the color of the coin.\n \"\"\"\n\n def __init__(self, max_steps, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions = np.array([0, 1]) # 1 pick coin.\n self.step_count = 0\n self.state = 0 # initially, coin is red (for first player)\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n rewards = np.asarray([ac0, ac1]) # +1 point if thw agent picks coin.\n \n # conflict\n if ac0 and self.state == 1:\n rewards[1] -= 2\n \n if ac1 and self.state == 0:\n rewards[0] -= 2\n\n if np.random.rand() < 0.5:\n self.state = 0\n else:\n self.state = 1\n\n done = (self.step_count == self.max_steps)\n\n return self.state, rewards, done\n#\n\nclass CoinGame():\n \"\"\"\n Coin Game from LOLA paper, played over a NxN grid\n \"\"\"\n\n def __init__(self, max_steps=5, batch_size=1, tabular=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions = np.array([0, 1, 2, 3]) # four directions to move. Agents pick up coins by moving onto the position where the coin is located\n self.step_count = 0\n self.N = 3\n self.available_actions = np.array(\n [0, 1])\n self.available_actions_DM = np.array(\n [0, 1, 2, 3])\n self.available_actions_Adv = np.array(\n [0, 1, 2, 3])\n #self.state = np.zeros([4, self.N, self.N]) # blue player, red player, blue coin, red coin positions as OHE over grid.\n\n self.blue_player = [1, 0]\n self.red_player = [1, 2]\n if (np.random.rand() < 0.0):\n self.blue_coin = [0, 1]\n self.red_coin = [2, 1]\n else:\n self.blue_coin = [2, 1]\n self.red_coin = [0, 1]\n\n self.tabular = tabular\n\n def get_state(self):\n o = np.zeros([4, self.N, self.N])\n o[0,self.blue_player[0], self.blue_player[1]] = 1\n o[1,self.red_player[0], self.red_player[1]] = 1\n o[2,self.blue_coin[0], self.blue_coin[1]] = 1\n o[3,self.red_coin[0], self.red_coin[1]] = 1\n\n if self.tabular:\n p1 = self.blue_player[0] + self.N*self.blue_player[1]\n p2 = self.red_player[0] + self.N*self.red_player[1]\n p3 = self.blue_coin[0] + self.N*self.blue_coin[1]\n p4 = self.red_coin[0] + self.N*self.red_coin[1]\n return int(p1 + (self.N)**2 * p2 + ((self.N)**2)**2 * p3 + ((self.N)**2)**3 * p4)\n\n return o\n\n def reset(self):\n self.step_count = 0\n\n # initial positions\n self.blue_player = [1, 0]\n self.red_player = [1, 2]\n\n if (np.random.rand() < 0.0):\n self.blue_coin = [0, 1]\n self.red_coin = [2, 1]\n else:\n self.blue_coin = [2, 1]\n self.red_coin = [0, 1]\n\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n reward_blue, reward_red = 0, 0\n\n # agents move\n if ac0 == 0: # up\n self.blue_player[0] = np.maximum(self.blue_player[0] - 1, 0)\n elif ac0 == 1: # right\n self.blue_player[1] = np.minimum(self.blue_player[1] + 1, self.N-1)\n elif ac0 == 2: # down\n self.blue_player[0] = np.minimum(self.blue_player[0] + 1, self.N-1)\n else:\n self.blue_player[1] = np.maximum(self.blue_player[1] - 1, 0)\n\n if ac1 == 0: # up\n self.red_player[0] = np.maximum(self.red_player[0] - 1, 0)\n elif ac1 == 1: # right\n self.red_player[1] = np.minimum(self.red_player[1] + 1, self.N-1)\n elif ac1 == 2: # down\n self.red_player[0] = np.minimum(self.red_player[0] + 1, self.N-1)\n else:\n self.red_player[1] = np.maximum(self.red_player[1] - 1, 0)\n\n # check coins\n # if either agent picks coin, +1 for him\n if self.blue_player == self.blue_coin:\n if self.red_player == self.blue_coin:\n reward_blue += 0.5\n else:\n reward_blue += 1\n self.blue_coin = [-1, -1]\n\n if self.red_player == self.red_coin:\n if self.blue_player == self.red_coin:\n reward_red += 0.5\n else:\n reward_red += 1\n self.red_coin = [-1, -1]\n\n if self.blue_player == self.red_coin:\n if self.red_player == self.red_coin:\n reward_blue += 0.5\n else:\n reward_blue += 1\n self.red_coin = [-1, -1]\n \n if self.red_player == self.blue_coin:\n if self.blue_player == self.blue_coin:\n reward_red += 0.5\n else:\n reward_red += 1\n self.blue_coin = [-1, -1]\n \n \n \n \n done = self.step_count == self.max_steps\n \n return self.get_state(), np.array([reward_blue, reward_red]), done" ]
[ [ "numpy.dot", "numpy.logical_not", "numpy.maximum", "numpy.minimum", "numpy.asarray", "numpy.nan_to_num", "numpy.ones", "numpy.all", "numpy.sign", "numpy.base_repr", "numpy.argmax", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zxsted/meta-critic-networks
[ "1768751f84845bd6fe98a13d5b57dfaca154c1f8" ]
[ "multi_arm_bandit/mvn_arm4/mvn_test_arm4_sample10_new.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport random\nimport os\n\nimport json\n\ndef save_to_json(fname, data):\n with open(fname, 'w') as outfile:\n json.dump(data, outfile)\n\n# Hyper Parameters\nTASK_NUMS = 100\nTEST_NUMS_PER_TASK = 10\nARM_NUMS = 4\nSTEP = 300\nSAMPLE_NUMS = 10\n\n\nclass MultiArmBandit():\n \"\"\"docstring for MultiArmBandit\"\"\"\n def __init__(self,arm_nums,probs):\n self.arm_nums = arm_nums\n self.probs = probs#np.random.dirichlet(np.ones(arm_nums),size=1)[0]\n\n def step(self,action): # one hot action\n prob = np.sum(self.probs * action)\n if random.random() < prob:\n return 1\n else:\n return 0\n\n\n\nclass ActorNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,action_size):\n super(ActorNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,action_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = F.log_softmax(self.fc3(out))\n return out\n\nclass MetaValueNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,output_size):\n super(MetaValueNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,output_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = self.fc3(out)\n return out\n\nclass TaskConfigNetwork(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers, output_size):\n super(TaskConfigNetwork, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n # Set initial states\n h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))\n c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))\n # Forward propagate RNN\n out, _ = self.lstm(x, (h0, c0))\n # Decode hidden state of last time step\n out = self.fc(out[:, -1, :])\n return out\n\ndef roll_out(actor_network,task,sample_nums):\n actions = []\n rewards = []\n softmax_action = torch.exp(actor_network(Variable(torch.Tensor([[1]]))))\n for step in range(sample_nums):\n action = np.random.choice(ARM_NUMS,p=softmax_action.data.numpy()[0])\n one_hot_action = [int(i == action) for i in range(ARM_NUMS)]\n reward = task.step(one_hot_action)\n actions.append(one_hot_action)\n rewards.append([reward])\n\n return torch.Tensor([actions]),torch.Tensor([rewards])\n\ndef roll_out_actions(actor_network,sample_nums):\n actions = []\n rewards = []\n softmax_action = torch.exp(actor_network(Variable(torch.Tensor([[1]]))))\n for step in range(sample_nums):\n action = np.random.choice(ARM_NUMS,p=softmax_action.data.numpy()[0])\n one_hot_action = [int(i == action) for i in range(ARM_NUMS)]\n actions.append(one_hot_action)\n\n return torch.Tensor([actions])\n\ndef main():\n\n mvn_input_dim = ARM_NUMS + 3\n task_config_input_dim = ARM_NUMS + 1\n # init meta value network with a task config network\n meta_value_network = MetaValueNetwork(input_size = mvn_input_dim,hidden_size = 80,output_size = 1)\n task_config_network = TaskConfigNetwork(input_size = task_config_input_dim,hidden_size = 30,num_layers = 1,output_size = 3)\n\n if os.path.exists(\"meta_value_network_arm4.pkl\"):\n meta_value_network.load_state_dict(torch.load(\"meta_value_network_arm4.pkl\"))\n print(\"load meta value network success\")\n if os.path.exists(\"task_config_network_arm4.pkl\"):\n task_config_network.load_state_dict(torch.load(\"task_config_network_arm4.pkl\"))\n print(\"load task config network success\")\n\n\n # init a task generator for data fetching\n results = []\n\n total_rewards = 0\n\n task_probs = json.load(open(\"tasks_arm4.json\"))\n\n for episode in range(TASK_NUMS):\n res_i = {}\n task_prob = task_probs[episode][\"task_probs\"]\n task = MultiArmBandit(ARM_NUMS,np.array(task_prob))\n res_i[\"arm_nums\"] = ARM_NUMS\n res_i[\"task_probs\"] = task.probs.tolist()\n res_i[\"sample_nums\"] = SAMPLE_NUMS\n\n aver_rewards = []\n correct_probs = []\n for test_nums in range(TEST_NUMS_PER_TASK):\n actor_network = ActorNetwork(1,40,ARM_NUMS)\n actor_network_optim = torch.optim.Adam(actor_network.parameters(),lr=0.001)\n\n pre_actions,pre_rewards = roll_out(actor_network,task,SAMPLE_NUMS)\n pre_data_samples = torch.cat((pre_actions,pre_rewards),2)\n\n task_configs = task_config_network(Variable(pre_data_samples)).repeat(1,SAMPLE_NUMS).view(-1,3)\n\n for step in range(STEP):\n\n inputs = Variable(torch.Tensor([[1]])) #[1,1]\n #actions = roll_out_actions(actor_network,SAMPLE_NUMS)\n actions_var = Variable(pre_actions.view(-1,ARM_NUMS))\n actor_data_samples = torch.cat((actions_var,task_configs.detach()),1) #[task_nums,5]\n log_softmax_actions = actor_network(inputs) # [1,2]\n log_softmax_actions = log_softmax_actions.repeat(1,SAMPLE_NUMS).view(-1,ARM_NUMS)\n # train actor network\n\n actor_network_optim.zero_grad()\n qs = meta_value_network(actor_data_samples)\n actor_network_loss = - torch.mean(torch.sum(log_softmax_actions*actions_var,1)* qs) #+ actor_criterion(actor_y_samples,target_y)\n actor_network_loss.backward()\n\n actor_network_optim.step()\n\n choice = torch.exp(actor_network(inputs)).data[0].numpy()\n aver_reward = np.sum(choice * task.probs)\n optimal_action = np.argmax(task.probs)\n optimal_choice = [int(i == optimal_action) for i in range(ARM_NUMS)]\n correct_prob = np.sum(choice*optimal_choice)\n\n aver_rewards.append(float(aver_reward))\n correct_probs.append(float(correct_prob))\n total_rewards += aver_reward\n\n\n res_i[\"aver_rewards\"] = aver_rewards\n res_i[\"correct_probs\"] = correct_probs\n\n results.append(res_i)\n\n print(\"aver_reward\",np.mean(aver_rewards),\"correct prob:\",np.mean(correct_probs),\"task:\",task.probs)\n\n save_to_json('mvn_arm_4_sample_10.json', results)\n print(\"total aver reward:\",total_rewards/TASK_NUMS/TEST_NUMS_PER_TASK)\n\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.Tensor", "torch.load", "torch.nn.LSTM", "torch.cat", "torch.sum", "torch.nn.Linear", "numpy.argmax", "numpy.mean", "numpy.array", "numpy.sum", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
baustin13/two-stg-alma
[ "6400fbf1435fc4ef78331f8c730ce09dc5665cd5" ]
[ "Code/sage+gat+diffpool/cross_val.py" ]
[ "import networkx as nx\nimport numpy as np\nimport torch\n\nimport pickle\nimport random\n\nfrom graph_sampler import GraphSampler\n\ndef prepare_val_data(graphs, args, val_idx, max_nodes=0):\n\n random.shuffle(graphs)\n val_size = len(graphs) // 10\n train_graphs = graphs[:val_idx * val_size]\n if val_idx < 9:\n train_graphs = train_graphs + graphs[(val_idx+1) * val_size :]\n val_graphs = graphs[val_idx*val_size: (val_idx+1)*val_size]\n print('Num training graphs: ', len(train_graphs), \n '; Num validation graphs: ', len(val_graphs))\n\n print('Number of graphs: ', len(graphs))\n print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))\n print('Max, avg, std of graph size: ', \n max([G.number_of_nodes() for G in graphs]), ', '\n \"{0:.2f}\".format(np.mean([G.number_of_nodes() for G in graphs])), ', '\n \"{0:.2f}\".format(np.std([G.number_of_nodes() for G in graphs])))\n\n # minibatch\n \n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=False,\n num_workers=args.num_workers)\n print(\"feat dim\")\n print(dataset_sampler.feat_dim)\n return train_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\n# split train, val, test sets: for original differential pooling setting (each train, val, test is a data loader)\ndef split_train_val_normal(graphs, args, val_test_idx, max_nodes, feat):\n\n # split train, val, test\n\n ## if there is a validation set: 80% train, 10% val, 10% test\n \n if args.val == True:\n val_test_size = len(graphs) // 5\n train_graphs = graphs[:val_test_idx * val_test_size]\n if val_test_idx < 4:\n train_graphs = train_graphs + graphs[(val_test_idx+1) * val_test_size :]\n val_test_graphs = graphs[val_test_idx*val_test_size: (val_test_idx+1)*val_test_size]\n val_size = len(val_test_graphs) // 2\n val_graphs = val_test_graphs[:val_size]\n test_graphs = val_test_graphs[val_size:]\n \n\n ## if there is no validation set: 90% train, 10% test\n else:\n test_idx = val_test_idx\n test_size = len(graphs) // 10\n train_graphs = graphs[:test_idx * test_size]\n if test_idx < 9:\n train_graphs = train_graphs + graphs[(test_idx+1) * test_size :]\n test_graphs = graphs[test_idx*test_size: (test_idx+1)*test_size]\n\n # train set loader\n print(len(train_graphs))\n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n # test set loader\n testset_sampler = GraphSampler(test_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n test_dataset_loader = torch.utils.data.DataLoader(\n testset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n if args.val:\n valset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n valset_sampler, \n batch_size=args.batch_size, \n shuffle=False,\n num_workers=args.num_workers)\n else:\n val_dataset_loader = test_dataset_loader\n\n #print(\"feat dim\")\n #print(dataset_sampler.feat_dim)\n return train_dataset_loader, test_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\n\n\n\n# split train, val, test sets: for triplet train setting (each train, val, test is a dictionary, keys are the classes, values are arrays of graphs)\ndef split_train_val(graphs, args, val_test_idx, max_nodes, feat):\n\n num_classes = args.num_classes \n \n # shuffle the dataset\n random.shuffle(graphs)\n\n # split train, val, test\n\n ## if there is a validation set: 80% train, 10% val, 10% test\n if args.val == True:\n val_test_size = len(graphs) // 5\n train_graphs = graphs[:val_test_idx * val_test_size]\n if val_test_idx < 4:\n train_graphs = train_graphs + graphs[(val_test_idx+1) * val_test_size :]\n val_test_graphs = graphs[val_test_idx*val_test_size: (val_test_idx+1)*val_test_size]\n val_size = len(val_test_graphs) // 2\n val_graphs = val_test_graphs[:val_size]\n test_graphs = val_test_graphs[val_size:]\n \n\n ## if there is no validation set: 90% train, 10% test\n else:\n test_idx = val_test_idx\n test_size = len(graphs) // 10\n train_graphs = graphs[:test_idx * test_size]\n if test_idx < 9:\n train_graphs = train_graphs + graphs[(test_idx+1) * test_size :]\n test_graphs = graphs[test_idx*test_size: (test_idx+1)*test_size]\n\n train_graphs_dict = dict()\n test_graphs_dict = dict()\n val_graphs_dict = dict()\n\n for i in range(num_classes):\n train_graphs_dict[i] = []\n test_graphs_dict[i] = []\n val_graphs_dict[i] = []\n\n node_list = list(train_graphs[0].nodes)\n representative_node = node_list[0]\n\n feat_dim = train_graphs[0].nodes[representative_node]['feat'].shape[0]\n assign_feat_dim = feat_dim\n\n for train_graph in train_graphs:\n num_nodes = train_graph.number_of_nodes()\n # label\n label = int(train_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(train_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n train_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(train_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = train_graph.nodes[u]['feat']\n else:\n f[i,:] = (train_graph.nodes[u]['feat'].data).cpu().numpy()\n train_graph.graph['feats'] = f\n\n # num_nodes\n train_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n train_graph.graph['assign_feats'] = f\n \n train_graphs_dict[label].append(train_graph)\n\n\n for test_graph in test_graphs:\n\n num_nodes = test_graph.number_of_nodes()\n # label\n label = int(test_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(test_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n test_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(test_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = test_graph.nodes[u]['feat']\n else:\n f[i,:] = (test_graph.nodes[u]['feat'].data).cpu().numpy()\n\n test_graph.graph['feats'] = f\n\n # num_nodes\n test_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n test_graph.graph['assign_feats'] = f\n \n \n test_graphs_dict[label].append(test_graph)\n\n \n if args.val == True:\n for val_graph in val_graphs:\n\n num_nodes = val_graph.number_of_nodes()\n # label\n label = int(val_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(val_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n val_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(val_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = val_graph.nodes[u]['feat']\n else:\n f[i,:] = (val_graph.nodes[u]['feat'].data).cpu().numpy()\n\n val_graph.graph['feats'] = f\n\n # num_nodes\n val_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n val_graph.graph['assign_feats'] = f\n \n \n val_graphs_dict[label].append(val_graph)\n\n \n\n return train_graphs_dict, test_graphs_dict, val_graphs_dict, \\\n max_nodes, feat_dim, assign_feat_dim\n \n \n\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GarrettNicolai/OpenNMT-py
[ "9491d900ac1b50fe39da417bacc0b9d610331888" ]
[ "onmt/translate/translator.py" ]
[ "#!/usr/bin/env python\n\"\"\" Translator Class and builder \"\"\"\nfrom __future__ import print_function\nimport codecs\nimport os\nimport time\nimport numpy as np\nfrom itertools import count, zip_longest\n\nimport torch\n\nimport onmt.model_builder\nimport onmt.inputters as inputters\nimport onmt.decoders.ensemble\nfrom onmt.translate.beam_search import BeamSearch\nfrom onmt.translate.greedy_search import GreedySearch\nfrom onmt.utils.misc import tile, set_random_seed, report_matrix\nfrom onmt.utils.alignment import extract_alignment, build_align_pharaoh\nfrom onmt.modules.copy_generator import collapse_copy_scores\n\n\ndef build_translator(opt, report_score=True, logger=None, out_file=None):\n if out_file is None:\n out_file = codecs.open(opt.output, 'w+', 'utf-8')\n\n load_test_model = onmt.decoders.ensemble.load_test_model \\\n if len(opt.models) > 1 else onmt.model_builder.load_test_model\n fields, model, model_opt = load_test_model(opt)\n\n scorer = onmt.translate.GNMTGlobalScorer.from_opt(opt)\n translator = Translator.from_opt(\n model,\n fields,\n opt,\n model_opt,\n global_scorer=scorer,\n out_file=out_file,\n report_align=opt.report_align,\n report_score=report_score,\n logger=logger\n )\n model.decoder.set_eval_status(True)\n\n return translator\n\n\ndef max_tok_len(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch # this is a hack\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n # max_tgt_in_batch = 0\n # Src: [<bos> w1 ... wN <eos>]\n max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)\n # Tgt: [w1 ... wM <eos>]\n src_elements = count * max_src_in_batch\n return src_elements\n\n\nclass Translator(object):\n \"\"\"Translate a batch of sentences with a saved model.\n\n Args:\n model (onmt.modules.NMTModel): NMT model to use for translation\n fields (dict[str, torchtext.data.Field]): A dict\n mapping each side to its list of name-Field pairs.\n src_reader (onmt.inputters.DataReaderBase): Source reader.\n tgt_reader (onmt.inputters.TextDataReader): Target reader.\n gpu (int): GPU device. Set to negative for no GPU.\n n_best (int): How many beams to wait for.\n min_length (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n max_length (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n beam_size (int): Number of beams.\n random_sampling_topk (int): See\n :class:`onmt.translate.greedy_search.GreedySearch`.\n random_sampling_temp (int): See\n :class:`onmt.translate.greedy_search.GreedySearch`.\n stepwise_penalty (bool): Whether coverage penalty is applied every step\n or not.\n dump_beam (bool): Debugging option.\n block_ngram_repeat (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n ignore_when_blocking (set or frozenset): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n replace_unk (bool): Replace unknown token.\n data_type (str): Source data type.\n verbose (bool): Print/log every translation.\n report_time (bool): Print/log total time/frequency.\n copy_attn (bool): Use copy attention.\n global_scorer (onmt.translate.GNMTGlobalScorer): Translation\n scoring/reranking object.\n out_file (TextIO or codecs.StreamReaderWriter): Output file.\n report_score (bool) : Whether to report scores\n logger (logging.Logger or NoneType): Logger.\n \"\"\"\n\n def __init__(\n self,\n model,\n fields,\n src_reader,\n tgt_reader,\n gpu=-1,\n n_best=1,\n min_length=0,\n max_length=100,\n ratio=0.,\n beam_size=30,\n random_sampling_topk=1,\n random_sampling_temp=1,\n stepwise_penalty=None,\n dump_beam=False,\n block_ngram_repeat=0,\n ignore_when_blocking=frozenset(),\n replace_unk=False,\n phrase_table=\"\",\n data_type=\"text\",\n verbose=False,\n report_time=False,\n copy_attn=False,\n global_scorer=None,\n out_file=None,\n report_align=False,\n report_score=True,\n logger=None,\n seed=-1):\n self.model = model\n self.fields = fields\n tgt_field = dict(self.fields)[\"tgt\"].base_field\n self._tgt_vocab = tgt_field.vocab\n self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]\n self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]\n self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]\n self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]\n self._tgt_vocab_len = len(self._tgt_vocab)\n\n self._gpu = gpu\n self._use_cuda = gpu > -1\n self._dev = torch.device(\"cuda\", self._gpu) \\\n if self._use_cuda else torch.device(\"cpu\")\n\n self.n_best = n_best\n self.max_length = max_length\n\n self.beam_size = beam_size\n self.random_sampling_temp = random_sampling_temp\n self.sample_from_topk = random_sampling_topk\n\n self.min_length = min_length\n self.ratio = ratio\n self.stepwise_penalty = stepwise_penalty\n self.dump_beam = dump_beam\n self.block_ngram_repeat = block_ngram_repeat\n self.ignore_when_blocking = ignore_when_blocking\n self._exclusion_idxs = {\n self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}\n self.src_reader = src_reader\n self.tgt_reader = tgt_reader\n self.replace_unk = replace_unk\n if self.replace_unk and not self.model.decoder.attentional:\n raise ValueError(\n \"replace_unk requires an attentional decoder.\")\n self.phrase_table = phrase_table\n self.data_type = data_type\n self.verbose = verbose\n self.report_time = report_time\n\n self.copy_attn = copy_attn\n\n self.global_scorer = global_scorer\n if self.global_scorer.has_cov_pen and \\\n not self.model.decoder.attentional:\n raise ValueError(\n \"Coverage penalty requires an attentional decoder.\")\n self.out_file = out_file\n self.report_align = report_align\n self.report_score = report_score\n self.logger = logger\n\n self.use_filter_pred = False\n self._filter_pred = None\n\n # for debugging\n self.beam_trace = self.dump_beam != \"\"\n self.beam_accum = None\n if self.beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n set_random_seed(seed, self._use_cuda)\n\n @classmethod\n def from_opt(\n cls,\n model,\n fields,\n opt,\n model_opt,\n global_scorer=None,\n out_file=None,\n report_align=False,\n report_score=True,\n logger=None):\n \"\"\"Alternate constructor.\n\n Args:\n model (onmt.modules.NMTModel): See :func:`__init__()`.\n fields (dict[str, torchtext.data.Field]): See\n :func:`__init__()`.\n opt (argparse.Namespace): Command line options\n model_opt (argparse.Namespace): Command line options saved with\n the model checkpoint.\n global_scorer (onmt.translate.GNMTGlobalScorer): See\n :func:`__init__()`..\n out_file (TextIO or codecs.StreamReaderWriter): See\n :func:`__init__()`.\n report_align (bool) : See :func:`__init__()`.\n report_score (bool) : See :func:`__init__()`.\n logger (logging.Logger or NoneType): See :func:`__init__()`.\n \"\"\"\n\n src_reader = inputters.str2reader[opt.data_type].from_opt(opt)\n tgt_reader = inputters.str2reader[\"text\"].from_opt(opt)\n return cls(\n model,\n fields,\n src_reader,\n tgt_reader,\n gpu=opt.gpu,\n n_best=opt.n_best,\n min_length=opt.min_length,\n max_length=opt.max_length,\n ratio=opt.ratio,\n beam_size=opt.beam_size,\n random_sampling_topk=opt.random_sampling_topk,\n random_sampling_temp=opt.random_sampling_temp,\n stepwise_penalty=opt.stepwise_penalty,\n dump_beam=opt.dump_beam,\n block_ngram_repeat=opt.block_ngram_repeat,\n ignore_when_blocking=set(opt.ignore_when_blocking),\n replace_unk=opt.replace_unk,\n phrase_table=opt.phrase_table,\n data_type=opt.data_type,\n verbose=opt.verbose,\n report_time=opt.report_time,\n copy_attn=model_opt.copy_attn,\n global_scorer=global_scorer,\n out_file=out_file,\n report_align=report_align,\n report_score=report_score,\n logger=logger,\n seed=opt.seed)\n\n def _log(self, msg):\n if self.logger:\n self.logger.info(msg)\n else:\n print(msg)\n\n def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,\n use_src_map, enc_states, batch_size, src):\n if \"tgt\" in batch.__dict__:\n gs = self._score_target(\n batch, memory_bank, src_lengths, src_vocabs,\n batch.src_map if use_src_map else None)\n self.model.decoder.init_state(src, memory_bank, enc_states)\n else:\n gs = [0] * batch_size\n return gs\n\n def translate(\n self,\n src,\n tgt=None,\n src_dir=None,\n batch_size=None,\n batch_type=\"sents\",\n attn_debug=False,\n align_debug=False,\n phrase_table=\"\"):\n \"\"\"Translate content of ``src`` and get gold scores from ``tgt``.\n\n Args:\n src: See :func:`self.src_reader.read()`.\n tgt: See :func:`self.tgt_reader.read()`.\n src_dir: See :func:`self.src_reader.read()` (only relevant\n for certain types of data).\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n align_debug (bool): enables the word alignment logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions\n \"\"\"\n\n if batch_size is None:\n raise ValueError(\"batch_size must be set\")\n\n src_data = {\"reader\": self.src_reader, \"data\": src, \"dir\": src_dir}\n tgt_data = {\"reader\": self.tgt_reader, \"data\": tgt, \"dir\": None}\n _readers, _data, _dir = inputters.Dataset.config(\n [('src', src_data), ('tgt', tgt_data)])\n\n data = inputters.Dataset(\n self.fields, readers=_readers, data=_data, dirs=_dir,\n sort_key=inputters.str2sortkey[self.data_type],\n filter_pred=self._filter_pred\n )\n\n data_iter = inputters.OrderedIterator(\n dataset=data,\n device=self._dev,\n batch_size=batch_size,\n batch_size_fn=max_tok_len if batch_type == \"tokens\" else None,\n train=False,\n sort=False,\n sort_within_batch=True,\n shuffle=False\n )\n\n xlation_builder = onmt.translate.TranslationBuilder(\n data, self.fields, self.n_best, self.replace_unk, tgt,\n self.phrase_table\n )\n\n # Statistics\n counter = count(1)\n pred_score_total, pred_words_total = 0, 0\n gold_score_total, gold_words_total = 0, 0\n\n all_scores = []\n all_predictions = []\n\n start_time = time.time()\n\n for batch in data_iter:\n batch_data = self.translate_batch(\n batch, data.src_vocabs, attn_debug\n )\n translations = xlation_builder.from_batch(batch_data)\n for trans in translations:\n all_scores += [trans.pred_scores[:self.n_best]]\n pred_score_total += trans.pred_scores[0]\n pred_words_total += len(trans.pred_sents[0])\n if tgt is not None:\n gold_score_total += trans.gold_score\n gold_words_total += len(trans.gold_sent) + 1\n\n n_best_preds = [\" \".join(pred)\n for pred in trans.pred_sents[:self.n_best]]\n if self.report_align:\n align_pharaohs = [build_align_pharaoh(align) for align\n in trans.word_aligns[:self.n_best]]\n n_best_preds_align = [\" \".join(align) for align\n in align_pharaohs]\n n_best_preds = [pred + \" ||| \" + align\n for pred, align in zip(\n n_best_preds, n_best_preds_align)]\n all_predictions += [n_best_preds]\n self.out_file.write('\\n'.join(n_best_preds) + '\\n')\n self.out_file.flush()\n\n if self.verbose:\n sent_number = next(counter)\n output = trans.log(sent_number)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n if attn_debug:\n preds = trans.pred_sents[0]\n preds.append('</s>')\n attns = trans.attns[0].tolist()\n if self.data_type == 'text':\n srcs = trans.src_raw\n else:\n srcs = [str(item) for item in range(len(attns[0]))]\n output = report_matrix(srcs, preds, attns)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n if align_debug:\n if trans.gold_sent is not None:\n tgts = trans.gold_sent\n else:\n tgts = trans.pred_sents[0]\n align = trans.word_aligns[0].tolist()\n if self.data_type == 'text':\n srcs = trans.src_raw\n else:\n srcs = [str(item) for item in range(len(align[0]))]\n output = report_matrix(srcs, tgts, align)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n end_time = time.time()\n\n if self.report_score:\n msg = self._report_score('PRED', pred_score_total,\n pred_words_total)\n self._log(msg)\n if tgt is not None:\n msg = self._report_score('GOLD', gold_score_total,\n gold_words_total)\n self._log(msg)\n\n if self.report_time:\n total_time = end_time - start_time\n self._log(\"Total translation time (s): %f\" % total_time)\n self._log(\"Average translation time (s): %f\" % (\n total_time / len(all_predictions)))\n self._log(\"Tokens per second: %f\" % (\n pred_words_total / total_time))\n\n if self.dump_beam:\n import json\n json.dump(self.translator.beam_accum,\n codecs.open(self.dump_beam, 'w', 'utf-8'))\n return all_scores, all_predictions\n\n def _align_pad_prediction(self, predictions, bos, pad):\n \"\"\"\n Padding predictions in batch and add BOS.\n\n Args:\n predictions (List[List[Tensor]]): `(batch, n_best,)`, for each src\n sequence contain n_best tgt predictions all of which ended with\n eos id.\n bos (int): bos index to be used.\n pad (int): pad index to be used.\n\n Return:\n batched_nbest_predict (torch.LongTensor): `(batch, n_best, tgt_l)`\n \"\"\"\n dtype, device = predictions[0][0].dtype, predictions[0][0].device\n flatten_tgt = [best.tolist() for bests in predictions\n for best in bests]\n paded_tgt = torch.tensor(\n list(zip_longest(*flatten_tgt, fillvalue=pad)),\n dtype=dtype, device=device).T\n bos_tensor = torch.full([paded_tgt.size(0), 1], bos,\n dtype=dtype, device=device)\n full_tgt = torch.cat((bos_tensor, paded_tgt), dim=-1)\n batched_nbest_predict = full_tgt.view(\n len(predictions), -1, full_tgt.size(-1)) # (batch, n_best, tgt_l)\n return batched_nbest_predict\n\n def _align_forward(self, batch, predictions):\n \"\"\"\n For a batch of input and its prediction, return a list of batch predict\n alignment src indice Tensor in size ``(batch, n_best,)``.\n \"\"\"\n # (0) add BOS and padding to tgt prediction\n if hasattr(batch, 'tgt'):\n batch_tgt_idxs = batch.tgt.transpose(1, 2).transpose(0, 2)\n else:\n batch_tgt_idxs = self._align_pad_prediction(\n predictions, bos=self._tgt_bos_idx, pad=self._tgt_pad_idx)\n tgt_mask = (batch_tgt_idxs.eq(self._tgt_pad_idx) |\n batch_tgt_idxs.eq(self._tgt_eos_idx) |\n batch_tgt_idxs.eq(self._tgt_bos_idx))\n\n n_best = batch_tgt_idxs.size(1)\n # (1) Encoder forward.\n src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)\n\n # (2) Repeat src objects `n_best` times.\n # We use batch_size x n_best, get ``(src_len, batch * n_best, nfeat)``\n src = tile(src, n_best, dim=1)\n enc_states = tile(enc_states, n_best, dim=1)\n if isinstance(memory_bank, tuple):\n memory_bank = tuple(tile(x, n_best, dim=1) for x in memory_bank)\n else:\n memory_bank = tile(memory_bank, n_best, dim=1)\n src_lengths = tile(src_lengths, n_best) # ``(batch * n_best,)``\n\n # (3) Init decoder with n_best src,\n self.model.decoder.init_state(src, memory_bank, enc_states)\n # reshape tgt to ``(len, batch * n_best, nfeat)``\n tgt = batch_tgt_idxs.view(-1, batch_tgt_idxs.size(-1)).T.unsqueeze(-1)\n dec_in = tgt[:-1] # exclude last target from inputs\n _, attns = self.model.decoder(\n dec_in, memory_bank, memory_lengths=src_lengths, with_align=True)\n\n alignment_attn = attns[\"align\"] # ``(B, tgt_len-1, src_len)``\n # masked_select\n align_tgt_mask = tgt_mask.view(-1, tgt_mask.size(-1))\n prediction_mask = align_tgt_mask[:, 1:] # exclude bos to match pred\n # get aligned src id for each prediction's valid tgt tokens\n alignement = extract_alignment(\n alignment_attn, prediction_mask, src_lengths, n_best)\n return alignement\n\n def translate_batch(self, batch, src_vocabs, attn_debug):\n #self.model.decoder.set_eval_status(True)\n \"\"\"Translate a batch of sentences.\"\"\"\n with torch.no_grad():\n if self.beam_size == 1:\n decode_strategy = GreedySearch(\n pad=self._tgt_pad_idx,\n bos=self._tgt_bos_idx,\n eos=self._tgt_eos_idx,\n batch_size=batch.batch_size,\n min_length=self.min_length, max_length=self.max_length,\n block_ngram_repeat=self.block_ngram_repeat,\n exclusion_tokens=self._exclusion_idxs,\n return_attention=attn_debug or self.replace_unk,\n sampling_temp=self.random_sampling_temp,\n keep_topk=self.sample_from_topk)\n else:\n # TODO: support these blacklisted features\n assert not self.dump_beam\n decode_strategy = BeamSearch(\n self.beam_size,\n batch_size=batch.batch_size,\n pad=self._tgt_pad_idx,\n bos=self._tgt_bos_idx,\n eos=self._tgt_eos_idx,\n n_best=self.n_best,\n global_scorer=self.global_scorer,\n min_length=self.min_length, max_length=self.max_length,\n return_attention=attn_debug or self.replace_unk,\n block_ngram_repeat=self.block_ngram_repeat,\n exclusion_tokens=self._exclusion_idxs,\n stepwise_penalty=self.stepwise_penalty,\n ratio=self.ratio)\n \n #self.model.decoder.set_eval_status(False)\n\n return self._translate_batch_with_strategy(batch, src_vocabs,\n decode_strategy)\n\n def _run_encoder(self, batch):\n src, src_lengths = batch.src if isinstance(batch.src, tuple) \\\n else (batch.src, None)\n\n enc_states, memory_bank, src_lengths = self.model.encoder(\n src, src_lengths)\n if src_lengths is None:\n assert not isinstance(memory_bank, tuple), \\\n 'Ensemble decoding only supported for text data'\n src_lengths = torch.Tensor(batch.batch_size) \\\n .type_as(memory_bank) \\\n .long() \\\n .fill_(memory_bank.size(0))\n return src, enc_states, memory_bank, src_lengths\n\n def _decode_and_generate(\n self,\n decoder_in,\n memory_bank,\n batch,\n src_vocabs,\n memory_lengths,\n src_map=None,\n step=None,\n batch_offset=None):\n if self.copy_attn:\n # Turn any copied words into UNKs.\n decoder_in = decoder_in.masked_fill(\n decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx\n )\n\n # Decoder forward, takes [tgt_len, batch, nfeats] as input\n # and [src_len, batch, hidden] as memory_bank\n # in case of inference tgt_len = 1, batch = beam times batch_size\n # in case of Gold Scoring tgt_len = actual length, batch = 1 batch\n self.model.decoder.set_copy_info(batch, self._tgt_vocab)\n dec_out, dec_attn = self.model.decoder(\n decoder_in, memory_bank, memory_lengths=memory_lengths, step=step\n )\n\n # Generator forward.\n if not self.copy_attn:\n if \"std\" in dec_attn:\n attn = dec_attn[\"std\"]\n else:\n attn = None\n log_probs = self.model.generator(dec_out.squeeze(0))\n # returns [(batch_size x beam_size) , vocab ] when 1 step\n # or [ tgt_len, batch_size, vocab ] when full sentence\n else:\n attn = dec_attn[\"copy\"]\n #print(\"DEC_OUT: \", dec_out.size())\n #print(\"ATTN: \", attn.size())\n scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),\n attn.view(-1, attn.size(2)),\n src_map)\n # here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]\n if batch_offset is None:\n scores = scores.view(-1, batch.batch_size, scores.size(-1))\n scores = scores.transpose(0, 1).contiguous()\n else:\n scores = scores.view(-1, self.beam_size, scores.size(-1))\n\n\n #print(\"TGT_VOCAB: \", self._tgt_vocab)\n scores = collapse_copy_scores(\n scores,\n batch,\n self._tgt_vocab,\n src_vocabs,\n batch_dim=0,\n batch_offset=batch_offset\n )\n scores = scores.view(decoder_in.size(0), -1, scores.size(-1))\n\n log_probs = scores.squeeze(0).log()\n #print(log_probs.size())\n # returns [(batch_size x beam_size) , vocab ] when 1 step\n # or [ tgt_len, batch_size, vocab ] when full sentence\n return log_probs, attn\n\n def _translate_batch_with_strategy(\n self,\n batch,\n src_vocabs,\n decode_strategy):\n \"\"\"Translate a batch of sentences step by step using cache.\n\n Args:\n batch: a batch of sentences, yield by data iterator.\n src_vocabs (list): list of torchtext.data.Vocab if can_copy.\n decode_strategy (DecodeStrategy): A decode strategy to use for\n generate translation step by step.\n\n Returns:\n results (dict): The translation results.\n \"\"\"\n # (0) Prep the components of the search.\n use_src_map = self.copy_attn\n parallel_paths = decode_strategy.parallel_paths # beam_size\n batch_size = batch.batch_size\n\n # (1) Run the encoder on the src.\n src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)\n self.model.decoder.init_state(src, memory_bank, enc_states)\n\n results = {\n \"predictions\": None,\n \"scores\": None,\n \"attention\": None,\n \"batch\": batch,\n \"gold_score\": self._gold_score(\n batch, memory_bank, src_lengths, src_vocabs, use_src_map,\n enc_states, batch_size, src)}\n\n # (2) prep decode_strategy. Possibly repeat src objects.\n src_map = batch.src_map if use_src_map else None\n fn_map_state, memory_bank, memory_lengths, src_map = \\\n decode_strategy.initialize(memory_bank, src_lengths, src_map)\n if fn_map_state is not None:\n self.model.decoder.map_state(fn_map_state)\n\n # (3) Begin decoding step by step:\n for step in range(decode_strategy.max_length):\n decoder_input = decode_strategy.current_predictions.view(1, -1, 1)\n log_probs, attn = self._decode_and_generate(\n decoder_input,\n memory_bank,\n batch,\n src_vocabs,\n memory_lengths=memory_lengths,\n src_map=src_map,\n step=step,\n batch_offset=decode_strategy.batch_offset)\n\n decode_strategy.advance(log_probs, attn)\n any_finished = decode_strategy.is_finished.any()\n if any_finished:\n decode_strategy.update_finished()\n if decode_strategy.done:\n break\n\n select_indices = decode_strategy.select_indices\n\n if any_finished:\n # Reorder states.\n if isinstance(memory_bank, tuple):\n memory_bank = tuple(x.index_select(1, select_indices)\n for x in memory_bank)\n else:\n memory_bank = memory_bank.index_select(1, select_indices)\n\n memory_lengths = memory_lengths.index_select(0, select_indices)\n\n if src_map is not None:\n src_map = src_map.index_select(1, select_indices)\n\n if parallel_paths > 1 or any_finished:\n self.model.decoder.map_state(\n lambda state, dim: state.index_select(dim, select_indices))\n\n results[\"scores\"] = decode_strategy.scores\n results[\"predictions\"] = decode_strategy.predictions\n results[\"attention\"] = decode_strategy.attention\n if self.report_align:\n results[\"alignment\"] = self._align_forward(\n batch, decode_strategy.predictions)\n else:\n results[\"alignment\"] = [[] for _ in range(batch_size)]\n return results\n\n def _score_target(self, batch, memory_bank, src_lengths,\n src_vocabs, src_map):\n tgt = batch.tgt\n tgt_in = tgt[:-1]\n\n log_probs, attn = self._decode_and_generate(\n tgt_in, memory_bank, batch, src_vocabs,\n memory_lengths=src_lengths, src_map=src_map)\n\n log_probs[:, :, self._tgt_pad_idx] = 0\n gold = tgt[1:]\n gold_scores = log_probs.gather(2, gold)\n gold_scores = gold_scores.sum(dim=0).view(-1)\n\n return gold_scores\n\n def _report_score(self, name, score_total, words_total):\n if words_total == 0:\n msg = \"%s No words predicted\" % (name,)\n else:\n avg_score = score_total / words_total\n ppl = np.exp(-score_total.item() / words_total)\n msg = (\"%s AVG SCORE: %.4f, %s PPL: %.4f\" % (\n name, avg_score,\n name, ppl))\n return msg\n" ]
[ [ "torch.device", "torch.no_grad", "torch.Tensor", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maxpark/hailo_model_zoo
[ "94beb7d80ef56e5dfa9978c90486e45a73306c79" ]
[ "hailo_model_zoo/core/postprocessing/detection/nanodet.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tensorflow.image import combined_non_max_suppression\n\nfrom .centernet import COCO_2017_TO_2014_TRANSLATION\n\n\nclass NanoDetPostProc:\n def __init__(self, img_dims=(416, 416), nms_iou_thresh=0.6, labels_offset=0,\n score_threshold=0.3, anchors=None, classes=80, **kwargs):\n self._num_classes = classes\n self._image_dims = img_dims\n self._nms_iou_thresh = nms_iou_thresh\n self._score_threshold = score_threshold\n self._strides = anchors.strides\n self.reg_max = anchors.regression_length\n self._labels_offset = labels_offset\n\n def _get_scores_boxes(self, endnodes):\n scores, boxes = [], []\n for node in endnodes:\n fm_size_h, fm_size_w = node.shape[1:3]\n scores.append(tf.reshape(node[:, :, :, :self._num_classes],\n [-1, fm_size_h * fm_size_w, self._num_classes]))\n boxes.append(tf.reshape(node[:, :, :, self._num_classes:],\n [-1, fm_size_h * fm_size_w, 4, (self.reg_max + 1)]))\n return tf.concat(scores, axis=1), boxes\n\n def _box_decoding(self, raw_boxes):\n boxes = None\n for box_distribute, stride in zip(raw_boxes, self._strides):\n\n # create grid\n shape = [int(x / stride) for x in self._image_dims]\n grid_x = np.arange(shape[1])\n grid_y = np.arange(shape[0])\n grid_x, grid_y = np.meshgrid(grid_x, grid_y)\n ct_row = (grid_y.flatten() + 0.5) * stride\n ct_col = (grid_x.flatten() + 0.5) * stride\n center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)\n\n # box distribution to distance\n reg_range = np.arange(self.reg_max + 1)\n box_distance = tf.nn.softmax(box_distribute, axis=-1)\n box_distance = box_distance * np.reshape(reg_range, (1, 1, 1, -1))\n box_distance = tf.reduce_sum(box_distance, axis=-1)\n box_distance = box_distance * stride\n\n # decode box\n box_distance = tf.concat([box_distance[:, :, :2] * (-1), box_distance[:, :, 2:]], axis=-1)\n decode_box = np.expand_dims(center, axis=0) + box_distance\n\n # clipping\n xmin = tf.maximum(0.0, decode_box[:, :, 0]) / self._image_dims[1]\n ymin = tf.maximum(0.0, decode_box[:, :, 1]) / self._image_dims[0]\n xmax = tf.minimum(tf.cast(self._image_dims[1], tf.float32), decode_box[:, :, 2]) / self._image_dims[1]\n ymax = tf.minimum(tf.cast(self._image_dims[0], tf.float32), decode_box[:, :, 3]) / self._image_dims[0]\n decode_box = tf.transpose([ymin, xmin, ymax, xmax], [1, 2, 0])\n\n boxes = decode_box if boxes is None else tf.concat([boxes, decode_box], axis=1)\n return tf.expand_dims(boxes, axis=2)\n\n def postprocessing(self, endnodes, **kwargs):\n\n scores, raw_boxes = self._get_scores_boxes(endnodes)\n\n # decode score/class\n scores = tf.sigmoid(scores)\n\n # decode boxes\n boxes = self._box_decoding(raw_boxes)\n\n # nms\n (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = \\\n combined_non_max_suppression(boxes=boxes,\n scores=scores,\n score_threshold=self._score_threshold,\n iou_threshold=self._nms_iou_thresh,\n max_output_size_per_class=100,\n max_total_size=100)\n\n # adding offset to the class prediction and cast to integer\n def translate_coco_2017_to_2014(nmsed_classes):\n return np.vectorize(COCO_2017_TO_2014_TRANSLATION.get)(nmsed_classes).astype(np.int32)\n\n nmsed_classes = tf.cast(tf.add(nmsed_classes, self._labels_offset), tf.int16)\n [nmsed_classes] = tf.py_function(translate_coco_2017_to_2014, [nmsed_classes], ['int32'])\n nmsed_classes.set_shape((1, 100))\n\n return {'detection_boxes': nmsed_boxes,\n 'detection_scores': nmsed_scores,\n 'detection_classes': nmsed_classes,\n 'num_detections': num_detections}\n" ]
[ [ "tensorflow.image.combined_non_max_suppression", "tensorflow.concat", "tensorflow.nn.softmax", "tensorflow.transpose", "numpy.expand_dims", "numpy.reshape", "numpy.arange", "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "numpy.stack", "tensorflow.maximum", "tensorflow.cast", "numpy.vectorize", "tensorflow.add", "tensorflow.py_function", "numpy.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sunnyln/birdnet2
[ "d1a2b703475345d887c325c135013ed9f72d3a57", "d1a2b703475345d887c325c135013ed9f72d3a57" ]
[ "detectron2/modeling/meta_arch/rcnn.py", "tools/val_net_BirdNetPlus.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport torch\nfrom torch import nn\n\nfrom detectron2.structures import ImageList\nfrom detectron2.utils.logger import log_first_n\n\nfrom ..backbone import build_backbone\nfrom ..postprocessing import detector_postprocess\nfrom ..proposal_generator import build_proposal_generator\nfrom ..roi_heads import build_roi_heads\nfrom .build import META_ARCH_REGISTRY\n\n__all__ = [\"GeneralizedRCNN\", \"ProposalNetwork\"]\n\n\n@META_ARCH_REGISTRY.register()\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Generalized R-CNN. Any models that contains the following three components:\n 1. Per-image feature extraction (aka backbone)\n 2. Region proposal generation\n 3. Per-region feature extraction and prediction\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())\n\n assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)\n num_channels = len(cfg.MODEL.PIXEL_MEAN)\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n self.rotated_box_training = cfg.ROTATED_BOX_TRAINING\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances (optional): groundtruth :class:`Instances`\n * proposals (optional): :class:`Instances`, precomputed proposals.\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n list[dict]:\n Each dict is the output for one input image.\n The dict contains one key \"instances\" whose value is a :class:`Instances`.\n The :class:`Instances` object has the following keys:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\", \"pred_keypoints\"\n \"\"\"\n if not self.training:\n return self.inference(batched_inputs)\n\n images = self.preprocess_image(batched_inputs)\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n\n features = self.backbone(images.tensor)\n\n if self.proposal_generator:\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n proposal_losses = {}\n\n _, detector_losses = self.roi_heads(images, features, proposals, gt_instances)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):\n \"\"\"\n Run inference on the given inputs.\n\n Args:\n batched_inputs (list[dict]): same as in :meth:`forward`\n detected_instances (None or list[Instances]): if not None, it\n contains an `Instances` object per image. The `Instances`\n object contains \"pred_boxes\" and \"pred_classes\" which are\n known boxes in the image.\n The inference will then skip the detection of bounding boxes,\n and only predict other per-ROI outputs.\n do_postprocess (bool): whether to apply post-processing on the outputs.\n\n Returns:\n same as in :meth:`forward`.\n \"\"\"\n assert not self.training\n\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n\n if detected_instances is None:\n if self.proposal_generator:\n proposals, _ = self.proposal_generator(images, features, None)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n\n results, _ = self.roi_heads(images, features, proposals, None)\n else:\n detected_instances = [x.to(self.device) for x in detected_instances]\n results = self.roi_heads.forward_with_given_boxes(features, detected_instances)\n\n if do_postprocess:\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width, rotated_box_training=self.rotated_box_training)\n processed_results.append({\"instances\": r})\n return processed_results\n else:\n return results\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(x) for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images\n\n\n@META_ARCH_REGISTRY.register()\nclass ProposalNetwork(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.device = torch.device(cfg.MODEL.DEVICE)\n\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n Same as in :class:`GeneralizedRCNN.forward`\n\n Returns:\n list[dict]: Each dict is the output for one input image.\n The dict contains one key \"proposals\" whose value is a\n :class:`Instances` with keys \"proposal_boxes\" and \"objectness_logits\".\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(x) for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n features = self.backbone(images.tensor)\n\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n # In training, the proposals are not useful at all but we generate them anyway.\n # This makes RPN-only models about 5% slower.\n if self.training:\n return proposal_losses\n\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n proposals, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"proposals\": r})\n return processed_results\n", "#!/usr/local/bin/python3\nimport os, sys\nfrom detectron2.data.datasets import register_coco_instances\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom detectron2.config import get_cfg \nfrom detectron2.engine import default_setup\nimport logging\nimport numpy as np\nimport cv2\nfrom detectron2.engine import DefaultPredictor\nimport torch\nimport math\nfrom birdview_detection_refiner import BirdviewDetectionRefiner\nfrom utils_3d import _draw_projection_obstacle_to_cam\nfrom object_3d import Object3d\nfrom utils_calib import Calibration\nimport argparse\n# Env paths\nhome = os.getenv('HOME')\ndetectron2_root = os.getenv('DETECTRON_ROOT')\n\n'''\nThis script allows the user to:\n1. Obtain the annotations in KITTI format of one or multiple checkpoints, to be evaluated with an external evaluator like https://github.com/cguindel/eval_kitti\n2. Visualize and save the images resulting in both BEV and 3D as well\n3. Change the evaluation parameters and kitti_root by arguments\n'''\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Validation script for BirdNet+')\n parser.add_argument(\n '--config_file', help=\"Name of the configuration to use without extension\", default='Base-BirdNetPlus', type=str)\n parser.add_argument(\n '--ann_val', help=\"Validation file with the annotations in COCO format previously generated by the training script, without extension\", default='annotations_kitti_validation_carpedcycRDHCENT_VPRES_12BIN', type=str)\n parser.add_argument(\n '--write', help=\"Write results in KITTI format\", default=False, action=\"store_true\")\n parser.add_argument(\n '--img2show', help=\"Show a fixed number of images, 0 to eliminate the visualization\", default=0, type=int)\n parser.add_argument(\n '--save_img', help=\"Save images showed\", default=False, action=\"store_true\")\n parser.add_argument(\n '--eval_chkp', help=\"Starting from the second half of the checkpoints, the rest will be evaluated with a certain interval specified here, 1 to evaluate all of them\", default=1, type=int)\n parser.add_argument(\n '--force_test', help=\"Name of the checkpoint to extract annotations or evaluate, empty disable this option\", default='', type=str)\n parser.add_argument(\n '--score', help=\"Limitation for lower scores\", default=0.01, type=float)\n parser.add_argument(\n '--nms', help=\"NMS IoU for the overlapping obstacles per class\", default=0.3, type=float)\n parser.add_argument(\n '--kitti_root', help=\"Path of the KITTI dataset\", default='/media/datasets/kitti/object/training', type=str)\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\n# BEV parameters\nbvres = 0.05\nvelodyne_h = 1.73\nonly_front = True\n# BEV images\nim_path = os.path.join(detectron2_root,'datasets/bv_kitti/image') \n\ndef _read_imageset_file(path):\n with open(path, 'r') as f:\n lines = f.readlines()\n return [int(line) for line in lines]\n\n# Viewpoint calculation\ndef getfrombins(cl,bins):\n bin_dist = np.linspace(-math.pi,math.pi,bins+1)\n bin_res = (bin_dist[1]-bin_dist[0])/2.\n bin = [bin_dist[i]-bin_res for i in range(len(bin_dist)-1)][cl] \n return bin\n\nidclass = { 0:'Car', 1:'Van', 2:'Truck', 3:'Pedestrian', 4:'Person_sitting', 5:'Cyclist', 6:'Tram', 7:'Misc', 8:'DontCare'}\nidclass3 = { 0:'Car', 1:'Pedestrian', 2:'Cyclist'}\ndef catName(category_id,nclass):\n if nclass > 3:\n _idclass = idclass\n elif nclass == 3:\n _idclass = idclass3\n strclass = _idclass.get(category_id, nclass)\n return strclass \n\ndef prepareAnn(lbl, alpha, box, h=-1, w=-1, l=-1, x=-1000, y=-1000, z=-1000, ry=-10, score=None):\n ann = [\n lbl, \n -1,\n -1,\n alpha,\n box[0],box[1],box[2],box[3],\n h,w,l,\n x,y,z,\n ry\n ] \n if score is not None:\n ann.append(score)\n strAnn = ' '.join([str(x) for x in ann])\n obj3d = Object3d(strAnn)\n \n return ann, obj3d, strAnn\n\ndef prepare_for_coco_detection_KITTI(instance, output_folder, filename, write, kitti_calib_path, nclass, vp, bins, vp_res, hwrot, height_training):\n # Extract important information from instance class\n boxes = np.array(instance.get('pred_boxes').tensor)\n scores = np.array(instance.get('scores'))\n labels = np.array(instance.get('pred_classes'))\n if vp_res:\n alpha = np.array([rad for rad in instance.get('viewpoint_residual')]) if vp else np.ones((labels.shape))*(-10.00)\n else:\n alpha = np.array([getfrombins(cl,bins) for cl in instance.get('viewpoint')]) if vp else np.ones((labels.shape))*(-10.00)\n \n h = np.array([[h,g] for h,g in instance.get('height')]) if height_training else np.array([-1,-1000]*labels.shape)\n\n # Image BV\n bv_image = cv2.imread(filename).astype(np.uint8)\n\n if height_training:\n bv_ground = None\n else:\n # Ground BV\n bv_ground = np.fromfile(os.path.join(im_path,'ground_'+filename[-10:].split('.png')[0]+'.txt'),sep=' ')\n bv_ground = bv_ground.reshape(bv_image.shape[0],bv_image.shape[1],1)\n \n # Calibration for 3D\n calib_file = os.path.join(kitti_calib_path,filename[-10:].split('.png')[0]+'.txt')\n\n # Refiner for 3D\n refiner = BirdviewDetectionRefiner(bv_image, bv_ground, bvres, velodyne_h, only_front)\n\n im_ann = []\n im_ann_obj = []\n if write:\n file_ann = open(os.path.join(output_folder,filename[-10:].split('.png')[0]+'.txt'), 'w+')\n for k, box in enumerate(boxes):\n lbl = catName(labels[k],nclass)\n ann,obj3d,strAnn = prepareAnn(lbl,alpha[k],box,score=scores[k],h=h[k,0],z=h[k,1])\n\n if hwrot and height_training:\n refiner.refine_detection_rotated_wheight(obj3d)\n elif hwrot:\n refiner.refine_detection_rotated(obj3d)\n else:\n refiner.refine_detection(obj3d)\n if obj3d.height == -1:\n continue\n\n # Project points to camera frame coordinates\n calib = Calibration(calib_file)\n p = calib.project_velo_to_rect(np.array([[obj3d.location.x,obj3d.location.y,obj3d.location.z]]))\n\n # Change 2D bbox in BV getting 2D bbox in camera frame (projection)\n _,_,bbox2D = _draw_projection_obstacle_to_cam(obj3d, calib_file, bvres, only_front, False)\n if bbox2D == None:\n continue\n # Obtain alpha from yaw\n obj3d.alpha = obj3d.yaw -(-math.atan2(p[0][2],p[0][0]) - 1.5*math.pi)\n obj3d.alpha = obj3d.alpha%(2*math.pi)\n if obj3d.alpha > math.pi:\n obj3d.alpha -= 2*math.pi\n elif obj3d.alpha < -math.pi:\n obj3d.alpha += 2*math.pi\n\n # After refinement\n ann = [\n obj3d.kind_name, \n obj3d.truncated,\n obj3d.occluded,\n round(obj3d.alpha,6),\n round(bbox2D[0],6),round(bbox2D[1],6),round(bbox2D[2],6),round(bbox2D[3],6),\n round(obj3d.height,6), round(obj3d.width,6), round(obj3d.length,6), \n round(p[0][0],6), round(p[0][1],6), round(p[0][2],6), # Camera coordinates\n round(obj3d.yaw,6),\n obj3d.score, # DON'T ROUND IT\n ]\n\n im_ann.append(ann)\n im_ann_obj.append(obj3d)\n strAnn = ' '.join([str(x) for x in ann])\n \n if write:\n file_ann.write(strAnn+'\\n')\n if write:\n file_ann.close()\n return im_ann, im_ann_obj, instance\n\ndef main(config_file, ann_val, write, img2show, save_img, eval_chkp, force_test, score_thresh , nms_thresh, kitti_root ):\n # KITTI paths\n kitti_im_path = kitti_root+'/image_2'\n kitti_calib_path = kitti_root+'/calib'\n\n # LOGGER AND CONFIGURATION LOAD\n logger = logging.getLogger(\"detectron2.trainer\")\n cfg = get_cfg()\n cfg.merge_from_file(os.path.join(detectron2_root,\"configs/{}.yaml\".format(config_file)))\n default_setup(cfg, None)\n\n nclasses = cfg.MODEL.ROI_HEADS.NUM_CLASSES\n optional_arguments = []\n if cfg.VIEWPOINT:\n optional_arguments.append('viewpoint')\n if cfg.VIEWPOINT_RESIDUAL:\n optional_arguments.append('vp_res')\n if cfg.ROTATED_BOX_TRAINING:\n optional_arguments.append('bbox3D')\n if cfg.HEIGHT_TRAINING:\n optional_arguments.append('height')\n\n val_path = detectron2_root+\"/datasets/bv_kitti/annotations/{}.json\".format(ann_val)\n register_coco_instances(\"birdview_val\", {}, val_path, detectron2_root, optional_arguments)\n\n toeval = []\n models = os.listdir(cfg.OUTPUT_DIR)\n for model in models:\n if model.endswith('.pth') and not model=='model_final.pth':\n toeval.append(model)\n toeval.sort()\n toeval = toeval[:-1]\n if force_test:\n toeval = [e for e in toeval if force_test in e]\n f_eval = [folder.split('_')[1].split('.')[0] for folder in toeval]\n elif eval_chkp!=0:\n length = len(toeval)//2\n toeval = toeval[length::eval_chkp]\n toeval.append('model_final.pth')\n f_eval = [folder.split('_')[1].split('.')[0] for folder in toeval]\n else:\n toeval = ['model_final.pth']\n f_eval = ['final']\n print('Checkpoints to be evaluated: ',toeval)\n for checkpoint, eval_folder in zip(toeval,f_eval):\n cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, checkpoint) \n\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thresh \n cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = nms_thresh\n predictor = DefaultPredictor(cfg)\n\n val_bv_dicts = DatasetCatalog.get(\"birdview_val\")\n val_bv_meta = MetadataCatalog.get(\"birdview_val\")\n\n obj_anns = []\n kitti_results = []\n c = 0\n\n sample_idx = range(img2show) if img2show != 0 else [-1]\n logger.info(\"Showing {} predictions\".format(str(img2show)))\n ann_outdir = os.path.join(cfg.OUTPUT_DIR,'annotations',eval_folder)\n if not os.path.exists(ann_outdir):\n os.makedirs(ann_outdir)\n\n for image_id, d in enumerate(val_bv_dicts):\n c += 1\n file = os.path.join(ann_outdir,d[\"file_name\"][-10:].split('.png')[0]+'.txt')\n im = cv2.imread(d[\"file_name\"])\n print(\"Preparing prediction {}, from {}, image: {}\".format(str(c),str(len(val_bv_dicts)),d[\"file_name\"]))\n if not os.path.exists(file) or write:\n is_kitti_ann=False\n # Inference\n outputs = predictor(im)\n list_anns, obj_anns, instances = prepare_for_coco_detection_KITTI(outputs[\"instances\"].to(\"cpu\"), ann_outdir, d[\"file_name\"], write, kitti_calib_path, nclasses, cfg.VIEWPOINT, cfg.VP_BINS, cfg.VIEWPOINT_RESIDUAL, cfg.ROTATED_BOX_TRAINING, cfg.HEIGHT_TRAINING)\n kitti_results.append(list_anns)\n else:\n is_kitti_ann=True\n with open(file,'r') as f:\n list_anns = f.read().splitlines()\n kitti_results.append([anns.split(' ') for anns in list_anns] if list_anns else [])\n for ann in list_anns:\n obj_anns.append(Object3d(ann))\n \n if c in sample_idx:\n # Change BV aspect\n nonzero = np.where(im>0)\n im[nonzero]=255-im[nonzero]\n im=cv2.bitwise_not(im)\n\n kitti_im = cv2.imread(os.path.join(kitti_im_path,d[\"file_name\"][-10:]))\n calib_file = os.path.join(kitti_calib_path,d[\"file_name\"][-10:].split('.png')[0]+'.txt')\n # Show obstacles\n for i, obj in enumerate(obj_anns):\n kitti_im, im, _ = _draw_projection_obstacle_to_cam(obj, calib_file, bvres, only_front, True, kitti_im, im, is_kitti_ann=is_kitti_ann)\n cv2.imshow('image',kitti_im)\n cv2.imshow('bv_image',im)\n if save_img:\n im_outdir = os.path.join(cfg.OUTPUT_DIR,'images')\n if not os.path.exists(im_outdir):\n os.makedirs(im_outdir)\n cv2.imwrite(os.path.join(im_outdir,'3D_'+d[\"file_name\"][-10:]), kitti_im)\n cv2.imwrite(os.path.join(im_outdir,'BEV_'+d[\"file_name\"][-10:]), im)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n elif c > max(sample_idx) and not write:\n break\n\nif __name__ == '__main__':\n args = parse_args()\n\n main(args.config_file, args.ann_val, args.write, args.img2show, args.save_img, args.eval_chkp, args.force_test, args.score, args.nms, args.kitti_root)\n" ]
[ [ "torch.device", "torch.Tensor" ], [ "numpy.array", "numpy.where", "numpy.linspace", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iglpdc/nilearn
[ "a4cc998b7a34fa48a77ce46f9f0b6b4e75d8a2d1" ]
[ "nilearn/regions/region_extractor.py" ]
[ "\"\"\"\nBetter brain parcellations for Region of Interest analysis\n\"\"\"\n\nimport numbers\nimport numpy as np\n\nfrom scipy.ndimage import label\nfrom scipy.stats import scoreatpercentile\n\nfrom sklearn.externals.joblib import Memory\n\nfrom .. import masking\nfrom ..input_data import NiftiMapsMasker\nfrom .._utils import check_niimg, check_niimg_4d\nfrom ..image import new_img_like, resample_img\nfrom ..image.image import _smooth_array, threshold_img\nfrom .._utils.niimg_conversions import concat_niimgs, _check_same_fov\nfrom .._utils.niimg import _safe_get_data\nfrom .._utils.compat import _basestring\nfrom .._utils.ndimage import _peak_local_max\nfrom .._utils.segmentation import _random_walker\n\n\ndef _threshold_maps_ratio(maps_img, threshold):\n \"\"\" Automatic thresholding of atlas maps image.\n\n Considers the given threshold as a ratio to the total number of voxels\n in the brain volume. This gives a certain number within the data\n voxel size which means that nonzero voxels which fall above than this\n size will be kept across all the maps.\n\n Parameters\n ----------\n maps_img: Niimg-like object\n an image of brain atlas maps.\n threshold: float\n If float, value is used as a ratio to n_voxels to get a certain threshold\n size in number to threshold the image. The value should be positive and\n within the range of number of maps (i.e. n_maps in 4th dimension).\n\n Returns\n -------\n threshold_maps_img: Nifti1Image\n gives us thresholded image.\n \"\"\"\n maps = check_niimg(maps_img)\n n_maps = maps.shape[-1]\n if not isinstance(threshold, numbers.Real) or threshold <= 0 or threshold > n_maps:\n raise ValueError(\"threshold given as ratio to the number of voxels must \"\n \"be Real number and should be positive and between 0 and \"\n \"total number of maps i.e. n_maps={0}. \"\n \"You provided {1}\".format(n_maps, threshold))\n else:\n ratio = threshold\n\n maps_data = np.nan_to_num(maps.get_data())\n\n abs_maps = np.abs(maps_data)\n # thresholding\n cutoff_threshold = scoreatpercentile(\n abs_maps, 100. - (100. / n_maps) * ratio)\n maps_data[abs_maps < cutoff_threshold] = 0.\n\n threshold_maps_img = new_img_like(maps, maps_data)\n\n return threshold_maps_img\n\n\ndef connected_regions(maps_img, min_region_size=1350,\n extract_type='local_regions', smoothing_fwhm=6,\n mask_img=None):\n \"\"\" Extraction of brain connected regions into separate regions.\n\n Note: the region size should be defined in mm^3. See the documentation for\n more details.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n maps_img: Niimg-like object\n an image of brain activation or atlas maps to be extracted into set of\n separate brain regions.\n\n min_region_size: int, default 1350 mm^3, optional\n Minimum volume in mm3 for a region to be kept. For example, if the voxel\n size is 3x3x3 mm then the volume of the voxel is 27mm^3. By default, it\n is 1350mm^3 which means we take minimum size of 1350 / 27 = 50 voxels.\n\n extract_type: str {'connected_components', 'local_regions'} \\\n default local_regions, optional\n If 'connected_components', each component/region in the image is extracted\n automatically by labelling each region based upon the presence of unique\n features in their respective regions.\n If 'local_regions', each component/region is extracted based on their\n maximum peak value to define a seed marker and then using random walker\n segementation algorithm on these markers for region separation.\n\n smoothing_fwhm: scalar, default 6mm, optional\n To smooth an image to extract most sparser regions. This parameter\n is passed `_smooth_array` and exists only for extract_type 'local_regions'.\n\n mask_img: Niimg-like object, default None\n If given, mask image is applied to input data.\n If None, no masking is applied.\n\n Returns\n -------\n regions_extracted_img: Nifti1Image\n gives the image in 4D of extracted brain regions. Each 3D image consists\n of only one separated region.\n\n index_of_each_map: numpy array\n an array of list of indices where each index denotes the identity\n of each extracted region to their family of brain maps.\n \"\"\"\n all_regions_imgs = []\n index_of_each_map = []\n maps_img = check_niimg(maps_img, atleast_4d=True)\n maps = _safe_get_data(maps_img).copy()\n affine = maps_img.get_affine()\n min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3])))\n\n allowed_extract_types = ['connected_components', 'local_regions']\n if extract_type not in allowed_extract_types:\n message = (\"'extract_type' should be given either of these {0} \"\n \"You provided extract_type='{1}'\").format(allowed_extract_types, extract_type)\n raise ValueError(message)\n\n if mask_img is not None:\n if not _check_same_fov(maps_img, mask_img):\n mask_img = resample_img(mask_img,\n target_affine=maps_img.get_affine(),\n target_shape=maps_img.shape[:3],\n interpolation=\"nearest\")\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 to the values which are outside of the mask\n maps[mask_data == 0.] = 0.\n\n for index in range(maps.shape[-1]):\n regions = []\n map_3d = maps[..., index]\n # Mark the seeds using random walker\n if extract_type == 'local_regions':\n smooth_map = _smooth_array(map_3d, affine=affine, fwhm=smoothing_fwhm)\n seeds = _peak_local_max(smooth_map)\n seeds_label, seeds_id = label(seeds)\n # Assign -1 to values which are 0. to indicate to ignore\n seeds_label[map_3d == 0.] = -1\n rw_maps = _random_walker(map_3d, seeds_label)\n # Now simply replace \"-1\" with \"0\" for regions separation\n rw_maps[rw_maps == -1] = 0.\n label_maps = rw_maps\n else:\n # Connected component extraction\n label_maps, n_labels = label(map_3d)\n\n # Takes the size of each labelized region data\n labels_size = np.bincount(label_maps.ravel())\n # set background labels sitting in zero index to zero\n labels_size[0] = 0.\n for label_id, label_size in enumerate(labels_size):\n if label_size > min_region_size:\n region_data = (label_maps == label_id) * map_3d\n region_img = new_img_like(maps_img, region_data)\n regions.append(region_img)\n\n index_of_each_map.extend([index] * len(regions))\n all_regions_imgs.extend(regions)\n\n regions_extracted_img = concat_niimgs(all_regions_imgs)\n\n return regions_extracted_img, index_of_each_map\n\n\nclass RegionExtractor(NiftiMapsMasker):\n \"\"\"Class for brain region extraction.\n\n Region Extraction is a post processing technique which\n is implemented to automatically segment each brain atlas maps\n into different set of separated brain activated region.\n Particularly, to show that each decomposed brain maps can be\n used to focus on a target specific Regions of Interest analysis.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n maps_img: 4D Niimg-like object\n Image containing a set of whole brain atlas maps or statistically\n decomposed brain maps.\n\n mask_img: Niimg-like object or None, default None, optional\n Mask to be applied to input data, passed to NiftiMapsMasker.\n If None, no masking is applied.\n\n min_region_size: int, default 1350 mm^3, optional\n Minimum volume in mm3 for a region to be kept. For example, if\n the voxel size is 3x3x3 mm then the volume of the voxel is\n 27mm^3. By default, it is 1350mm^3 which means we take minimum\n size of 1350 / 27 = 50 voxels.\n\n threshold: number, default 1., optional\n A value used either in ratio_n_voxels or img_value or percentile\n `thresholding_strategy` based upon the choice of selection.\n\n thresholding_strategy: str {'ratio_n_voxels', 'img_value', 'percentile'}, optional\n If default 'ratio_n_voxels', we apply thresholding that will keep\n the more intense nonzero brain voxels (denoted as n_voxels)\n across all maps (n_voxels being the number of voxels in the brain\n volume). A float value given in `threshold` parameter indicates\n the ratio of voxels to keep meaning (if float=2. then maps will\n together have 2. x n_voxels non-zero voxels). If set to\n 'percentile', images are thresholded based on the score obtained\n with the given percentile on the data and the voxel intensities\n which are survived above this obtained score will be kept. If set\n to 'img_value', we apply thresholding based on the non-zero voxel\n intensities across all maps. A value given in `threshold`\n parameter indicates that we keep only those voxels which have\n intensities more than this value.\n\n extractor: str {'connected_components', 'local_regions'} default 'local_regions', optional\n If 'connected_components', each component/region in the image is\n extracted automatically by labelling each region based upon the\n presence of unique features in their respective regions. If\n 'local_regions', each component/region is extracted based on\n their maximum peak value to define a seed marker and then using\n random walker segementation algorithm on these markers for region\n separation.\n\n standardize: bool, True or False, default False, optional\n If True, the time series signals are centered and normalized by\n putting their mean to 0 and variance to 1. Recommended to\n set as True if signals are not already standardized.\n passed to class NiftiMapsMasker.\n\n detrend: bool, True or False, default False, optional\n This parameter is passed to nilearn.signal.clean basically\n indicates whether to detrend timeseries signals or not.\n passed to class NiftiMapsMasker.\n\n low_pass: float, default None, optional\n This value will be applied on the signals by passing to signal.clean\n Please see the related documentation signal.clean for more details.\n passed to class NiftiMapsMasker.\n\n high_pass: float, default None, optional\n This value will be applied on the signals by passing to signal.clean\n Please see the related documentation signal.clean for more details.\n passed to NiftiMapsMasker.\n\n t_r: float, default None, optional\n Repetition time in sec. This value is given to signal.clean\n Please see the related documentation for details.\n passed to NiftiMapsMasker.\n\n memory: instance of joblib.Memory, string, default None, optional\n Used to cache the masking process. If a string is given, the path\n is set with this string as a folder name in the directory.\n passed to NiftiMapsMasker.\n\n memory_level: int, default 0, optional\n Aggressiveness of memory catching. The higher the number, the higher\n the number of functions that will be cached. Zero mean no caching.\n passed to NiftiMapsMasker.\n\n verbose: int, default 0, optional\n Indicates the level of verbosity by printing the message. Zero\n indicates nothing is printed.\n\n Attributes\n ----------\n `index_` : numpy array\n array of list of indices where each index value is assigned to\n each separate region of its corresponding family of brain maps.\n\n `regions_img_` : Nifti1Image\n List of separated regions with each region lying on an\n original volume concatenated into a 4D image.\n\n References\n ----------\n * Abraham et al. \"Region segmentation for sparse decompositions:\n better brain parcellations from rest fMRI\", Sparsity Techniques in\n Medical Imaging, Sep 2014, Boston, United States. pp.8\n\n \"\"\"\n def __init__(self, maps_img, mask_img=None, min_region_size=1350,\n threshold=1., thresholding_strategy='ratio_n_voxels',\n extractor='local_regions', standardize=False, detrend=False,\n low_pass=None, high_pass=None, t_r=None,\n memory=Memory(cachedir=None), memory_level=0, verbose=0):\n super(RegionExtractor, self).__init__(\n maps_img=maps_img, mask_img=mask_img,\n standardize=standardize, detrend=detrend, low_pass=low_pass,\n high_pass=high_pass, t_r=t_r, memory=memory,\n memory_level=memory_level, verbose=verbose)\n self.maps_img = maps_img\n self.min_region_size = min_region_size\n self.thresholding_strategy = thresholding_strategy\n self.threshold = threshold\n self.extractor = extractor\n\n def fit(self, X=None, y=None):\n \"\"\" Prepare the data and setup for the region extraction\n \"\"\"\n maps_img = check_niimg_4d(self.maps_img)\n\n list_of_strategies = ['ratio_n_voxels', 'img_value', 'percentile']\n if self.thresholding_strategy not in list_of_strategies:\n message = (\"'thresholding_strategy' should be \"\n \"either of these {0}\").format(list_of_strategies)\n raise ValueError(message)\n\n if self.threshold is None or isinstance(self.threshold, _basestring):\n raise ValueError(\"The given input to threshold is not valid. \"\n \"Please submit a valid number specific to either of \"\n \"the strategy in {0}\".format(list_of_strategies))\n elif isinstance(self.threshold, numbers.Number):\n # foreground extraction\n if self.thresholding_strategy == 'ratio_n_voxels':\n threshold_maps = _threshold_maps_ratio(maps_img, self.threshold)\n else:\n if self.thresholding_strategy == 'percentile':\n self.threshold = \"{0}%\".format(self.threshold)\n threshold_maps = threshold_img(maps_img, mask_img=self.mask_img,\n threshold=self.threshold)\n\n # connected component extraction\n self.regions_img_, self.index_ = connected_regions(threshold_maps,\n self.min_region_size,\n self.extractor)\n\n self.maps_img = self.regions_img_\n super(RegionExtractor, self).fit()\n\n return self\n" ]
[ [ "scipy.ndimage.label", "scipy.stats.scoreatpercentile", "sklearn.externals.joblib.Memory", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
hodamr/biu-advenced-ai-ex2
[ "2df6eb7ed389378326bd5c24fae43a65f190d221" ]
[ "deep_rl/utils/torch_utils.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom .config import *\nimport torch\nimport torch.autograd as autograd\nimport os\n\ndef select_device(gpu_id):\n # if torch.cuda.is_available() and gpu_id >= 0:\n if gpu_id >= 0:\n Config.DEVICE = torch.device('cuda:%d' % (gpu_id))\n else:\n Config.DEVICE = torch.device('cpu')\n\ndef tensor(x):\n if isinstance(x, torch.Tensor):\n return x\n x = torch.tensor(x, device=Config.DEVICE, dtype=torch.float32)\n return x\n\ndef range_tensor(end):\n return torch.arange(end).long().to(Config.DEVICE)\n\ndef to_np(t):\n return t.cpu().detach().numpy()\n\ndef random_seed(seed=None):\n np.random.seed(seed)\n torch.manual_seed(np.random.randint(int(1e6)))\n\ndef set_one_thread():\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n torch.set_num_threads(1)\n\ndef huber(x, k=1.0):\n return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))\n\ndef epsilon_greedy(epsilon, x):\n if len(x.shape) == 1:\n return np.random.randint(len(x)) if np.random.rand() < epsilon else np.argmax(x)\n elif len(x.shape) == 2:\n random_actions = np.random.randint(x.shape[1], size=x.shape[0])\n greedy_actions = np.argmax(x, axis=-1)\n dice = np.random.rand(x.shape[0])\n return np.where(dice < epsilon, random_actions, greedy_actions)\n\ndef sync_grad(target_network, src_network):\n for param, src_param in zip(target_network.parameters(), src_network.parameters()):\n param._grad = src_param.grad.clone()\n\n# adapted from https://github.com/pytorch/pytorch/issues/12160\ndef batch_diagonal(input):\n # idea from here: https://discuss.pytorch.org/t/batch-of-diagonal-matrix/13560\n # batches a stack of vectors (batch x N) -> a stack of diagonal matrices (batch x N x N)\n # works in 2D -> 3D, should also work in higher dimensions\n # make a zero matrix, which duplicates the last dim of input\n dims = input.size()\n dims = dims + dims[-1:]\n output = torch.zeros(dims, device=input.device)\n # stride across the first dimensions, add one to get the diagonal of the last dimension\n strides = [output.stride(i) for i in range(input.dim() - 1 )]\n strides.append(output.size(-1) + 1)\n # stride and copy the input to the diagonal\n output.as_strided(input.size(), strides).copy_(input)\n return output\n\ndef batch_trace(input):\n i = range_tensor(input.size(-1))\n t = input[:, i, i].sum(-1).unsqueeze(-1).unsqueeze(-1)\n return t\n\n\nclass DiagonalNormal:\n def __init__(self, mean, std):\n self.dist = torch.distributions.Normal(mean, std)\n self.sample = self.dist.sample\n\n def log_prob(self, action):\n return self.dist.log_prob(action).sum(-1).unsqueeze(-1)\n\n def entropy(self):\n return self.dist.entropy().sum(-1).unsqueeze(-1)\n\n def cdf(self, action):\n return self.dist.cdf(action).prod(-1).unsqueeze(-1)\n\nclass BatchCategorical:\n def __init__(self, logits):\n self.pre_shape = logits.size()[:-1]\n logits = logits.view(-1, logits.size(-1))\n self.dist = torch.distributions.Categorical(logits=logits)\n\n def log_prob(self, action):\n log_pi = self.dist.log_prob(action.view(-1))\n log_pi = log_pi.view(action.size()[:-1] + (-1, ))\n return log_pi\n\n def entropy(self):\n ent = self.dist.entropy()\n ent = ent.view(self.pre_shape + (-1, ))\n return ent\n\n def sample(self, sample_shape=torch.Size([])):\n ret = self.dist.sample(sample_shape)\n ret = ret.view(sample_shape + self.pre_shape + (-1, ))\n return ret\n\n" ]
[ [ "torch.Size", "torch.zeros", "torch.arange", "torch.tensor", "torch.distributions.Categorical", "torch.set_num_threads", "torch.distributions.Normal", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KidChou/yolov5_prune
[ "126054962197a51c79140384c591b9190d146019" ]
[ "models/common.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport logging\nimport math\nimport warnings\nfrom copy import copy\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.cuda import amp\n\nfrom utils.datasets import exif_transpose, letterbox\nfrom utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \\\n scale_coords, xyxy2xywh\nfrom utils.plots import Annotator, colors\nfrom utils.torch_utils import time_sync\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n return self.act(self.conv(x))\n\n\nclass DWConv(Conv):\n # Depth-wise convolution class\n def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass TransformerLayer(nn.Module):\n # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n def __init__(self, c, num_heads):\n super().__init__()\n self.q = nn.Linear(c, c, bias=False)\n self.k = nn.Linear(c, c, bias=False)\n self.v = nn.Linear(c, c, bias=False)\n self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n self.fc1 = nn.Linear(c, c, bias=False)\n self.fc2 = nn.Linear(c, c, bias=False)\n\n def forward(self, x):\n x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n x = self.fc2(self.fc1(x)) + x\n return x\n\n\nclass TransformerBlock(nn.Module):\n # Vision Transformer https://arxiv.org/abs/2010.11929\n def __init__(self, c1, c2, num_heads, num_layers):\n super().__init__()\n self.conv = None\n if c1 != c2:\n self.conv = Conv(c1, c2)\n self.linear = nn.Linear(c2, c2) # learnable position embedding\n self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])\n self.c2 = c2\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n b, _, w, h = x.shape\n p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)\n return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass C3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n\nclass C3TR(C3):\n # C3 module with TransformerBlock()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass C3SPP(C3):\n # C3 module with SPP()\n def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = SPP(c_, c_, k)\n\n\nclass C3Ghost(C3):\n # C3 module with GhostBottleneck()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e) # hidden channels\n self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])\n\n\nclass SPP(nn.Module):\n # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPF(nn.Module):\n # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher\n def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * 4, c2, 1, 1)\n self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n y1 = self.m(x)\n y2 = self.m(y1)\n return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n # self.contract = Contract(gain=2)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n # return self.conv(self.contract(x))\n\n\nclass GhostConv(nn.Module):\n # Ghost Convolution https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups\n super().__init__()\n c_ = c2 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, k, s, None, g, act)\n self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)\n\n def forward(self, x):\n y = self.cv1(x)\n return torch.cat([y, self.cv2(y)], 1)\n\n\nclass GhostBottleneck(nn.Module):\n # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride\n super().__init__()\n c_ = c2 // 2\n self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw\n DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw\n GhostConv(c_, c2, 1, 1, act=False)) # pw-linear\n self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),\n Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n\n def forward(self, x):\n return self.conv(x) + self.shortcut(x)\n\n\nclass Contract(nn.Module):\n # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'\n s = self.gain\n x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)\n return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'\n s = self.gain\n x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)\n return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super().__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass AutoShape(nn.Module):\n # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n conf = 0.25 # NMS confidence threshold\n iou = 0.45 # NMS IoU threshold\n classes = None # (optional list) filter by class\n multi_label = False # NMS multiple labels per box\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self, model):\n super().__init__()\n self.model = model.eval()\n\n def autoshape(self):\n LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()\n return self\n\n def _apply(self, fn):\n # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers\n self = super()._apply(fn)\n m = self.model.model[-1] # Detect()\n m.stride = fn(m.stride)\n m.grid = list(map(fn, m.grid))\n if isinstance(m.anchor_grid, list):\n m.anchor_grid = list(map(fn, m.anchor_grid))\n return self\n\n @torch.no_grad()\n def forward(self, imgs, size=640, augment=False, profile=False):\n # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n # file: imgs = 'data/images/zidane.jpg' # str or PosixPath\n # URI: = 'https://ultralytics.com/images/zidane.jpg'\n # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)\n # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)\n # numpy: = np.zeros((640,1280,3)) # HWC\n # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)\n # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images\n\n t = [time_sync()]\n p = next(self.model.parameters()) # for device and type\n if isinstance(imgs, torch.Tensor): # torch\n with amp.autocast(enabled=p.device.type != 'cpu'):\n return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference\n\n # Pre-process\n n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images\n shape0, shape1, files = [], [], [] # image and inference shapes, filenames\n for i, im in enumerate(imgs):\n f = f'image{i}' # filename\n if isinstance(im, (str, Path)): # filename or uri\n im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im\n im = np.asarray(exif_transpose(im))\n elif isinstance(im, Image.Image): # PIL Image\n im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f\n files.append(Path(f).with_suffix('.jpg').name)\n if im.shape[0] < 5: # image in CHW\n im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)\n im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input\n s = im.shape[:2] # HWC\n shape0.append(s) # image shape\n g = (size / max(s)) # gain\n shape1.append([y * g for y in s])\n imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update\n shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape\n x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad\n x = np.stack(x, 0) if n > 1 else x[0][None] # stack\n x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW\n x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32\n t.append(time_sync())\n\n with amp.autocast(enabled=p.device.type != 'cpu'):\n # Inference\n y = self.model(x, augment, profile)[0] # forward\n t.append(time_sync())\n\n # Post-process\n y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,\n multi_label=self.multi_label, max_det=self.max_det) # NMS\n for i in range(n):\n scale_coords(shape1, y[i][:, :4], shape0[i])\n\n t.append(time_sync())\n return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n # YOLOv5 detections class for inference results\n def __init__(self, imgs, pred, files, times=None, names=None, shape=None):\n super().__init__()\n d = pred[0].device # device\n gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations\n self.imgs = imgs # list of images as numpy arrays\n self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)\n self.names = names # class names\n self.files = files # image filenames\n self.xyxy = pred # xyxy pixels\n self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels\n self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized\n self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized\n self.n = len(self.pred) # number of images (batch size)\n self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)\n self.s = shape # inference BCHW shape\n\n def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):\n crops = []\n for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string\n if pred.shape[0]:\n for c in pred[:, -1].unique():\n n = (pred[:, -1] == c).sum() # detections per class\n s += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \" # add to string\n if show or save or render or crop:\n annotator = Annotator(im, example=str(self.names))\n for *box, conf, cls in reversed(pred): # xyxy, confidence, class\n label = f'{self.names[int(cls)]} {conf:.2f}'\n if crop:\n file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None\n crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,\n 'im': save_one_box(box, im, file=file, save=save)})\n else: # all others\n annotator.box_label(box, label, color=colors(cls))\n im = annotator.im\n else:\n s += '(no detections)'\n\n im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np\n if pprint:\n LOGGER.info(s.rstrip(', '))\n if show:\n im.show(self.files[i]) # show\n if save:\n f = self.files[i]\n im.save(save_dir / f) # save\n if i == self.n - 1:\n LOGGER.info(f\"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}\")\n if render:\n self.imgs[i] = np.asarray(im)\n if crop:\n if save:\n LOGGER.info(f'Saved results to {save_dir}\\n')\n return crops\n\n def print(self):\n self.display(pprint=True) # print results\n LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %\n self.t)\n\n def show(self):\n self.display(show=True) # show results\n\n def save(self, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir\n self.display(save=True, save_dir=save_dir) # save results\n\n def crop(self, save=True, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None\n return self.display(crop=True, save=save, save_dir=save_dir) # crop results\n\n def render(self):\n self.display(render=True) # render results\n return self.imgs\n\n def pandas(self):\n # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n new = copy(self) # return copy\n ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns\n cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns\n for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update\n setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n return new\n\n def tolist(self):\n # return a list of Detections objects, i.e. 'for result in results.tolist():'\n x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]\n for d in x:\n for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n setattr(d, k, getattr(d, k)[0]) # pop out of list\n return x\n\n def __len__(self):\n return self.n\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)\n" ]
[ [ "torch.cat", "numpy.asarray", "pandas.DataFrame", "torch.cuda.amp.autocast", "torch.no_grad", "torch.nn.MultiheadAttention", "torch.from_numpy", "numpy.stack", "torch.tensor", "numpy.ascontiguousarray", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.SiLU", "torch.nn.Flatten", "numpy.tile", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
cx201910/first_ml
[ "b4ece4f275911707dda5ca461989f1dfdbf25021" ]
[ "backend/ml_service/apps/endpoints/views.py" ]
[ "from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework import mixins\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.decorators import action\n\nfrom .models import Endpoint\nfrom .serializers import EndpointSerializer\n\nfrom .models import MLAlgorithm\nfrom .serializers import MLAlgorithmSerializer\n\nfrom .models import MLAlgorithmStatus\nfrom .serializers import MLAlgorithmStatusSerializer\n\nfrom .models import MLRequest\nfrom .serializers import MLRequestSerializer\n\nimport json\nfrom numpy.random import rand\nfrom rest_framework import views, status\nfrom rest_framework.response import Response\nfrom apps.ml.registry import MLRegistry\nfrom ml_service.wsgi import registry\n\nfrom django.db import transaction\nfrom apps.endpoints.models import ABTest\nfrom apps.endpoints.serializers import ABTestSerializer\nfrom apps.endpoints.models import PredictStore\nfrom apps.endpoints.serializers import PredictStoreSerializer\n\nfrom django.db.models import F\nimport datetime\n\n# Create your views here.\nclass EndpointViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = EndpointSerializer\n queryset = Endpoint.objects.all()\n\n\nclass MLAlgorithmViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = MLAlgorithmSerializer\n queryset = MLAlgorithm.objects.all()\n\n\ndef deactivate_other_statuses(instance):\n old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm = instance.parent_mlalgorithm, created_at__lt=instance.created_at, active=True)\n for i in range(len(old_statuses)):\n old_statuses[i].active = False\n MLAlgorithmStatus.objects.bulk_update(old_statuses, ['active'])\n\nclass MLAlgorithmStatusViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):\n serializer_class = MLAlgorithmStatusSerializer\n queryset = MLAlgorithmStatus.objects.all()\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save(active=True)\n # set active=False for other statuses\n deactivate_other_statuses(instance)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass MLRequestViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet):\n serializer_class = MLRequestSerializer\n queryset = MLRequest.objects.all()\n\n\nclass PredictView(views.APIView):\n def post(self, request, endpoint_name, format=None):\n\n algorithm_status = self.request.query_params.get('status', 'production')\n algorithm_version = self.request.query_params.get('version')\n\n algs = MLAlgorithm.objects.filter(parent_endpoint__name=endpoint_name, status__status=algorithm_status, status__active=True)\n\n if algorithm_version is not None:\n algs = algs.filter(version = algorithm_version)\n\n if len(algs) == 0:\n return Response(\n {'status': 'Error', 'message': 'ML algorithm is not available'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n if len(algs) != 1 and algorithm_status != 'ab_testing':\n return Response(\n {'status': f'Error of {len(algs)} algorithms', 'message': 'ML algorithm selection is ambiguous. Please specify algorithm version.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n alg_index = 0\n if algorithm_status == 'ab_testing':\n alg_index = 0 if rand() < 0.5 else 1\n\n algorithm_object = registry.endpoints[algs[alg_index].id]\n prediction = algorithm_object.compute_prediction(request.data)\n\n\n label = prediction['label'] if 'label' in prediction else 'error'\n ml_request = MLRequest(\n input_data=json.dumps(request.data),\n full_response=prediction,\n response=label,\n feedback='',\n parent_mlalgorithm=algs[alg_index],\n )\n ml_request.save()\n\n prediction['request_id'] = ml_request.id \n\n return Response(prediction)\n\n\nclass ABTestViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,\n viewsets.GenericViewSet, mixins.CreateModelMixin,\n mixins.UpdateModelMixin):\n serializer_class = ABTestSerializer\n queryset = ABTest.objects.all()\n\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save()\n # update status for first algorithm\n\n status_1 = MLAlgorithmStatus(status = 'ab_testing',\n created_by=instance.created_by,\n parent_mlalgorithm = instance.parent_mlalgorithm_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status = 'ab_testing',\n created_by=instance.created_by,\n parent_mlalgorithm = instance.parent_mlalgorithm_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass StopABTestView(views.APIView):\n def post(self, request, ab_test_id, format=None):\n\n try:\n ab_test = ABTest.objects.get(pk=ab_test_id)\n\n if ab_test.ended_at is not None:\n return Response({'message': 'AB Test already finished.'})\n\n date_now = datetime.datetime.now()\n # alg #1 accuracy\n all_responses_1 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt = ab_test.created_at, created_at__lt = date_now).count()\n correct_responses_1 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt = ab_test.created_at, created_at__lt = date_now, response=F('feedback')).count()\n accuracy_1 = correct_responses_1 / float(all_responses_1)\n print(all_responses_1, correct_responses_1, accuracy_1)\n\n # alg #2 accuracy\n all_responses_2 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt = ab_test.created_at, created_at__lt = date_now).count()\n correct_responses_2 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt = ab_test.created_at, created_at__lt = date_now, response=F('feedback')).count()\n accuracy_2 = correct_responses_2 / float(all_responses_2)\n print(all_responses_2, correct_responses_2, accuracy_2)\n\n # select algorithm with higher accuracy\n alg_id_1, alg_id_2 = ab_test.parent_mlalgorithm_1, ab_test.parent_mlalgorithm_2\n # swap\n if accuracy_1 < accuracy_2:\n alg_id_1, alg_id_2 = alg_id_2, alg_id_1\n\n status_1 = MLAlgorithmStatus(status = 'production',\n created_by=ab_test.created_by,\n parent_mlalgorithm = alg_id_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status = 'testing',\n created_by=ab_test.created_by,\n parent_mlalgorithm = alg_id_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n\n summary = 'Algorithm #1 accuracy: {}, Algorithm #2 accuracy: {}'.format(accuracy_1, accuracy_2)\n ab_test.ended_at = date_now\n ab_test.summary = summary\n ab_test.save()\n\n except Exception as e:\n return Response({'status': 'Error', 'message': str(e)},\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response({'message': 'AB Test finished.', 'summary': summary})\n\n\nclass PredictStoreViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n\n serializer_class = PredictStoreSerializer\n queryset = PredictStore.objects.all()\n \n @action(detail=True, methods=['post'])\n def predict(self, request, pk=None, format=None): \n serializer = PredictStoreSerializer(data=request.data) \n \n if serializer.is_valid(): \n ml_algorithm_s = serializer.validated_data['ml_algorithm'] \n created_by_s = serializer.validated_data['created_by'] \n target = serializer.validated_data['target']\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n alg_status = MLAlgorithmStatus(status='production',\n created_by=created_by_s,\n parent_mlalgorithm=ml_algorithm_s, active=True)\n alg_status.save()\n deactivate_other_statuses(alg_status) \n \n data = json.loads(request.data['input_data'])\n algs = MLAlgorithm.objects.filter(status__parent_mlalgorithm=ml_algorithm_s, status__active=True)\n\n algorithm_object = registry.endpoints[algs[0].id]\n prediction = algorithm_object.compute_prediction(data)\n label = prediction['label'] if 'label' in prediction else 'error'\n ml_request = MLRequest(\n input_data=json.dumps(data),\n full_response=prediction,\n response=label,\n feedback=target,\n parent_mlalgorithm=algs[0], )\n ml_request.save()\n\n prediction[\"request_id\"] = ml_request.id\n \n if serializer.is_valid(): \n serializer.validated_data['prediction'] = prediction \n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if PredictStore.objects.filter(id=pk).exists():\n instance = PredictStore.objects.get(id=pk) \n instance.prediction = prediction\n instance.target = target\n instance.save() \n else: \n serializer.save() \n return Response(serializer.data) \n\n" ]
[ [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rajahaseeb147/3dFacialPartSegmentation
[ "aedfed75558761295e9bf602b18c2c3b631080e5", "aedfed75558761295e9bf602b18c2c3b631080e5" ]
[ "Deep Learning/Implementation_3/models/pointnet_cls.py", "Deep Learning/Implementation_3/data_utils/ShapeNetDataLoader.py" ]
[ "import torch.nn as nn\r\nimport torch.utils.data\r\nimport torch.nn.functional as F\r\nfrom pointnet_utils import PointNetEncoder, feature_transform_reguliarzer\r\n\r\nclass get_model(nn.Module):\r\n def __init__(self, k=40, normal_channel=True):\r\n super(get_model, self).__init__()\r\n if normal_channel:\r\n channel = 6\r\n else:\r\n channel = 3\r\n self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel)\r\n self.fc1 = nn.Linear(1024, 512)\r\n self.fc2 = nn.Linear(512, 256)\r\n self.fc3 = nn.Linear(256, k)\r\n self.dropout = nn.Dropout(p=0.4)\r\n self.bn1 = nn.BatchNorm1d(512)\r\n self.bn2 = nn.BatchNorm1d(256)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n x, trans, trans_feat = self.feat(x)\r\n x = F.relu(self.bn1(self.fc1(x)))\r\n x = F.relu(self.bn2(self.dropout(self.fc2(x))))\r\n x = self.fc3(x)\r\n x = F.log_softmax(x, dim=1)\r\n return x, trans_feat\r\n\r\nclass get_loss(torch.nn.Module):\r\n def __init__(self, mat_diff_loss_scale=0.001):\r\n super(get_loss, self).__init__()\r\n self.mat_diff_loss_scale = mat_diff_loss_scale\r\n\r\n def forward(self, pred, target, trans_feat):\r\n loss = F.nll_loss(pred, target)\r\n mat_diff_loss = feature_transform_reguliarzer(trans_feat)\r\n\r\n total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale\r\n return total_loss\r\n", "# *_*coding:utf-8 *_*\r\nimport os\r\nimport json\r\nimport warnings\r\nimport numpy as np\r\nfrom torch.utils.data import Dataset\r\nwarnings.filterwarnings('ignore')\r\n\r\ndef pc_normalize(pc):\r\n centroid = np.mean(pc, axis=0)\r\n pc = pc - centroid\r\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\r\n pc = pc / m\r\n return pc\r\n\r\nclass PartNormalDataset(Dataset):\r\n def __init__(self,root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal', npoints=2500, split='train', class_choice=None, normal_channel=False):\r\n self.npoints = npoints\r\n self.root = root\r\n self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')\r\n self.cat = {}\r\n self.normal_channel = normal_channel\r\n\r\n\r\n with open(self.catfile, 'r') as f:\r\n for line in f:\r\n ls = line.strip().split()\r\n self.cat[ls[0]] = ls[1]\r\n self.cat = {k: v for k, v in self.cat.items()}\r\n self.classes_original = dict(zip(self.cat, range(len(self.cat))))\r\n\r\n if not class_choice is None:\r\n self.cat = {k:v for k,v in self.cat.items() if k in class_choice}\r\n # print(self.cat)\r\n\r\n self.meta = {}\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:\r\n train_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:\r\n val_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:\r\n test_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n for item in self.cat:\r\n # print('category', item)\r\n self.meta[item] = []\r\n dir_point = os.path.join(self.root, self.cat[item])\r\n fns = sorted(os.listdir(dir_point))\r\n # print(fns[0][0:-4])\r\n if split == 'trainval':\r\n fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]\r\n elif split == 'train':\r\n fns = [fn for fn in fns if fn[0:-4] in train_ids]\r\n elif split == 'val':\r\n fns = [fn for fn in fns if fn[0:-4] in val_ids]\r\n elif split == 'test':\r\n fns = [fn for fn in fns if fn[0:-4] in test_ids]\r\n else:\r\n print('Unknown split: %s. Exiting..' % (split))\r\n exit(-1)\r\n\r\n # print(os.path.basename(fns))\r\n for fn in fns:\r\n token = (os.path.splitext(os.path.basename(fn))[0])\r\n self.meta[item].append(os.path.join(dir_point, token + '.txt'))\r\n\r\n self.datapath = []\r\n for item in self.cat:\r\n for fn in self.meta[item]:\r\n self.datapath.append((item, fn))\r\n\r\n self.classes = {}\r\n for i in self.cat.keys():\r\n self.classes[i] = self.classes_original[i]\r\n\r\n # Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels\r\n self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],\r\n 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],\r\n 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],\r\n 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],\r\n 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}\r\n\r\n # for cat in sorted(self.seg_classes.keys()):\r\n # print(cat, self.seg_classes[cat])\r\n\r\n self.cache = {} # from index to (point_set, cls, seg) tuple\r\n self.cache_size = 20000\r\n\r\n\r\n def __getitem__(self, index):\r\n if index in self.cache:\r\n point_set, cls, seg = self.cache[index]\r\n else:\r\n fn = self.datapath[index]\r\n cat = self.datapath[index][0]\r\n cls = self.classes[cat]\r\n cls = np.array([cls]).astype(np.int32)\r\n data = np.loadtxt(fn[1]).astype(np.float32)\r\n if not self.normal_channel:\r\n point_set = data[:, 0:3]\r\n else:\r\n point_set = data[:, 0:6]\r\n seg = data[:, -1].astype(np.int32)\r\n if len(self.cache) < self.cache_size:\r\n self.cache[index] = (point_set, cls, seg)\r\n point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])\r\n\r\n choice = np.random.choice(len(seg), self.npoints, replace=True)\r\n # resample\r\n point_set = point_set[choice, :]\r\n seg = seg[choice]\r\n\r\n return point_set, cls, seg\r\n\r\n def __len__(self):\r\n return len(self.datapath)\r\n\r\n\r\n\r\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "torch.nn.Linear", "torch.nn.ReLU" ], [ "numpy.array", "numpy.mean", "numpy.sum", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JinGyeSetBirdsFree/FudanOCR
[ "fd79b679044ea23fd9eb30691453ed0805d2e98b", "fd79b679044ea23fd9eb30691453ed0805d2e98b", "fd79b679044ea23fd9eb30691453ed0805d2e98b", "fd79b679044ea23fd9eb30691453ed0805d2e98b", "e6b18b0eefaf832b2eb7198f5df79e00bd4cee36" ]
[ "model/super_resolution_model/DocumentSRModel/models/srunitnet_2x_2x.py", "model/detection_model/AdvancedEAST/nms/setup.py", "model/detection_model/LSN/lib/model/test_roi_align.py", "model/detection_model/PSENet/.ipynb_checkpoints/train_ic15-checkpoint.py", "model/recognition_model/RARE/models_cap.py" ]
[ "import numpy as np\nfrom scipy.misc import imsave\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import models\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as Transforms\n\nfrom dataloader import TrainDataset, DevDataset, TestDataset\nfrom networks.unet import UNet, unet_weight_init\nfrom networks.hed import HED, HED_1L, hed_weight_init\nfrom networks.resnet import ResnetGenerator, Upscale4xResnetGenerator, Upscale2xResnetGenerator\nfrom networks.resnet_wdsr import WDSRResnetGenerator\nfrom networks.discriminators import NLayerDiscriminator\nfrom networks.vggfeature import VGGFeatureMap\nfrom utils.visualizer import Visualizer\nfrom utils.loss import BCE2d\nfrom utils.normalize import norm, denorm, weights_init_normal\nfrom utils.target import PSNR, SSIM, batch_compare_filter, batch_SSIM\n\n\nUSE_GPU = torch.cuda.is_available()\nNORM = 'batch'\n\nfrom scipy.misc import imsave\ndef save_img(img, save_fn=''):\n if not os.path.exists(os.path.split(save_fn)[0]):\n os.makedirs(os.path.split(save_fn)[0])\n if list(img.shape)[0] == 3:\n # save_image = img * 125.0\n save_image = img\n save_image = save_image.clamp(0, 1).numpy().transpose(1, 2, 0)\n else:\n save_image = img.squeeze().clamp(0, 1).numpy().transpose(1, 2, 0)\n\n imsave(save_fn, save_image)\n\n\nclass Model(object):\n def __init__(self, cfg):\n # parameter init\n self.env = cfg.env\n self.train_dataset = cfg.train_dataset\n self.valid_dataset = cfg.valid_dataset\n self.test_dataset = cfg.test_dataset\n self.data_dir = cfg.data_dir\n self.save_dir = cfg.save_dir\n\n self.num_threads = int(cfg.num_threads)\n self.num_epochs = int(cfg.num_epochs)\n self.save_epochs = int(cfg.save_epochs)\n self.pretrain_epochs = int(cfg.pretrain_epochs)\n self.batch_size = int(cfg.batch_size)\n self.valid_batch_size = int(cfg.valid_batch_size)\n self.test_batch_size = int(cfg.test_batch_size)\n self.plot_iter = int(cfg.plot_iter)\n self.crop_size = int(cfg.crop_size)\n self.scale_factor = int(cfg.scale_factor)\n self.lr = float(cfg.lr)\n\n def load_dataset(self, mode='train', random_scale=True, rotate=True, fliplr=True, fliptb=True):\n if mode == 'train':\n train_set = TrainDataset(os.path.join(self.data_dir, self.train_dataset),\n crop_size=self.crop_size, scale_factor=self.scale_factor,\n random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)\n return DataLoader(dataset=train_set, num_workers=self.num_threads,\n batch_size=self.batch_size, shuffle=True)\n elif mode == 'valid':\n valid_set = DevDataset(os.path.join(\n self.data_dir, self.valid_dataset))\n return DataLoader(dataset=valid_set, num_workers=self.num_threads,\n batch_size=self.valid_batch_size, shuffle=True)\n elif mode == 'test':\n test_set = TestDataset(os.path.join(\n self.data_dir, self.test_dataset))\n return DataLoader(dataset=test_set, num_workers=self.num_threads,\n batch_size=self.test_batch_size, shuffle=False)\n\n def train(self, edgenetpath=None, sr2x1_path=None, sr2x2_path=None, srcnn_path=None, srresnet_path=None,\n is_fine_tune=False, random_scale=True, rotate=True, fliplr=True, fliptb=True):\n vis = Visualizer(self.env)\n\n print('================ Loading datasets =================')\n # load training dataset\n print('## Current Mode: Train')\n # train_data_loader = self.load_dataset(mode='valid')\n train_data_loader = self.load_dataset(\n mode='train', random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)\n\n ##########################################################\n ##################### build network ######################\n ##########################################################\n print('Building Networks and initialize parameters\\' weights....')\n # init sr resnet\n # srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n # srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu',learn_residual=True)\n srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x1.apply(weights_init_normal)\n srresnet2x2.apply(weights_init_normal)\n\n # init discriminator\n discnet = NLayerDiscriminator(input_nc=3, ndf=64, n_layers=5)\n\n # init edgenet\n edgenet = HED_1L()\n if edgenetpath is None or not os.path.exists(edgenetpath):\n raise Exception('Invalid edgenet model')\n else:\n pretrained_dict = torch.load(edgenetpath)\n model_dict = edgenet.state_dict()\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n edgenet.load_state_dict(model_dict)\n\n # init vgg feature\n featuremapping = VGGFeatureMap(models.vgg19(pretrained=True))\n\n # load pretrained srresnet or just initialize\n if sr2x1_path is None or not os.path.exists(sr2x1_path):\n print('===> initialize the srresnet2x1')\n print('======> No pretrained model')\n else:\n print('======> loading the weight from pretrained model')\n pretrained_dict = torch.load(sr2x1_path)\n model_dict = srresnet2x1.state_dict()\n\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n srresnet2x1.load_state_dict(model_dict)\n\n if sr2x2_path is None or not os.path.exists(sr2x2_path):\n print('===> initialize the srresnet2x2')\n print('======> No pretrained model')\n else:\n print('======> loading the weight from pretrained model')\n pretrained_dict = torch.load(sr2x2_path)\n model_dict = srresnet2x2.state_dict()\n\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n srresnet2x2.load_state_dict(model_dict)\n\n # optimizer init\n # different learning rate\n lr = self.lr\n\n srresnet2x1_optimizer = optim.Adam(\n srresnet2x1.parameters(), lr=lr, betas=(0.9, 0.999))\n srresnet2x2_optimizer = optim.Adam(\n srresnet2x2.parameters(), lr=lr, betas=(0.9, 0.999))\n disc_optimizer = optim.Adam(\n discnet.parameters(), lr=lr/10, betas=(0.9, 0.999))\n\n # loss function init\n MSE_loss = nn.MSELoss()\n BCE_loss = nn.BCELoss()\n\n # cuda accelerate\n if USE_GPU:\n edgenet.cuda()\n srresnet2x1.cuda()\n srresnet2x2.cuda()\n discnet.cuda()\n featuremapping.cuda()\n MSE_loss.cuda()\n BCE_loss.cuda()\n print('\\tCUDA acceleration is available.')\n\n ##########################################################\n ##################### train network ######################\n ##########################################################\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n\n # batchnorm = nn.BatchNorm2d(1).cuda()\n\n edge_avg_loss = tnt.meter.AverageValueMeter()\n total_avg_loss = tnt.meter.AverageValueMeter()\n disc_avg_loss = tnt.meter.AverageValueMeter()\n # psnr_2x_avg = tnt.meter.AverageValueMeter()\n # ssim_2x_avg = tnt.meter.AverageValueMeter()\n # psnr_4x_avg = tnt.meter.AverageValueMeter()\n # ssim_4x_avg = tnt.meter.AverageValueMeter()\n\n srresnet2x1.train()\n srresnet2x2.train()\n discnet.train()\n\n itcnt = 0\n for epoch in range(self.num_epochs):\n edge_avg_loss.reset()\n total_avg_loss.reset()\n disc_avg_loss.reset()\n # psnr_2x_avg.reset()\n # ssim_2x_avg.reset()\n # psnr_4x_avg.reset()\n # ssim_4x_avg.reset()\n\n # learning rate is decayed by a factor every 20 epoch\n if (epoch + 1) % 5 == 0:\n for param_group in srresnet2x1_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for srresnet2x1: lr={}\".format(\n srresnet2x1_optimizer.param_groups[0][\"lr\"]))\n for param_group in srresnet2x2_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for srresnet2x2: lr={}\".format(\n srresnet2x2_optimizer.param_groups[0][\"lr\"]))\n for param_group in disc_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for discnet: lr={}\".format(\n disc_optimizer.param_groups[0][\"lr\"]))\n\n itbar = tqdm(enumerate(train_data_loader))\n for ii, (hr, lr2x, lr4x, bc2x, bc4x) in itbar:\n\n mini_batch = hr.size()[0]\n\n hr_ = Variable(hr)\n lr2x_ = Variable(lr2x)\n lr4x_ = Variable(lr4x)\n bc2x_ = Variable(bc2x)\n bc4x_ = Variable(bc4x)\n real_label = Variable(torch.ones(mini_batch))\n fake_label = Variable(torch.zeros(mini_batch))\n\n # cuda mode setting\n if USE_GPU:\n hr_ = hr_.cuda()\n lr2x_ = lr2x_.cuda()\n lr4x_ = lr4x_.cuda()\n bc2x_ = bc2x_.cuda()\n bc4x_ = bc4x_.cuda()\n real_label = real_label.cuda()\n fake_label = fake_label.cuda()\n\n # =============================================================== #\n # ================ Edge-based srresnet training ================= #\n # =============================================================== #\n sr2x_ = srresnet2x1(lr4x_)\n sr4x_ = srresnet2x2(lr2x_)\n\n '''===================== Train Discriminator ====================='''\n if epoch + 1 > self.pretrain_epochs:\n disc_optimizer.zero_grad()\n\n #===== 2x disc loss =====#\n real_decision_2x = discnet(lr2x_)\n real_loss_2x = BCE_loss(\n real_decision_2x, real_label.detach())\n\n fake_decision_2x = discnet(sr2x_.detach())\n fake_loss_2x = BCE_loss(\n fake_decision_2x, fake_label.detach())\n\n disc_loss_2x = real_loss_2x + fake_loss_2x\n\n disc_loss_2x.backward()\n disc_optimizer.step()\n\n #===== 4x disc loss =====#\n real_decision_4x = discnet(hr_)\n real_loss_4x = BCE_loss(\n real_decision_4x, real_label.detach())\n\n fake_decision_4x = discnet(sr4x_.detach())\n fake_loss_4x = BCE_loss(\n fake_decision_4x, fake_label.detach())\n\n disc_loss_4x = real_loss_4x + fake_loss_4x\n\n disc_loss_4x.backward()\n disc_optimizer.step()\n\n disc_avg_loss.add(\n (disc_loss_2x + disc_loss_4x).data.item())\n\n '''=================== Train srresnet Generator ==================='''\n edge_trade_off = [0.7, 0.2, 0.1, 0.05, 0.01, 0.3]\n if epoch + 1 > self.pretrain_epochs:\n a1, a2, a3 = 0.75, 0.1, 0.65\n else:\n a1, a2, a3 = 0.75, 0.0, 0.7\n\n if not is_fine_tune:\n #============ calculate 2x loss ==============#\n srresnet2x1_optimizer.zero_grad()\n\n #### Edgenet Loss ####\n pred = edgenet(sr2x_)\n real = edgenet(lr2x_)\n\n edge_loss_2x = BCE_loss(pred.detach(), real.detach())\n # for i in range(6):\n # edge_loss_2x += edge_trade_off[i] * \\\n # BCE_loss(pred[i].detach(), real[i].detach())\n # edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])\n\n #### Content Loss ####\n content_loss_2x = MSE_loss(sr2x_, lr2x_) #+ 0.1*BCE_loss(1-sr2x_, 1-lr2x_)\n\n #### Perceptual Loss ####\n real_feature = featuremapping(lr2x_)\n fake_feature = featuremapping(sr2x_)\n vgg_loss_2x = MSE_loss(fake_feature, real_feature.detach())\n\n #### Adversarial Loss ####\n advs_loss_2x = BCE_loss(discnet(sr2x_), real_label) if epoch + 1 > self.pretrain_epochs else 0\n # advs_loss_2x = 0\n\n #============== loss backward ===============#\n total_loss_2x = a1 * edge_loss_2x + a2 * advs_loss_2x + \\\n a3 * content_loss_2x + (1.0 - a3) * vgg_loss_2x\n\n # total_loss_2x = 1.0 * content_loss_2x + 0.25 * vgg_loss_2x\n\n total_loss_2x.backward()\n srresnet2x1_optimizer.step()\n\n #============ calculate scores ==============#\n # psnr_2x_score_process = batch_compare_filter(\n # sr2x_.cpu().data, lr2x, PSNR)\n # psnr_2x_avg.add(psnr_2x_score_process)\n\n # ssim_2x_score_process = batch_compare_filter(\n # sr2x_.cpu().data, lr2x, SSIM)\n # ssim_2x_avg.add(ssim_2x_score_process)\n\n #============ calculate 4x loss ==============#\n if is_fine_tune:\n sr4x_ = srresnet2x2(srresnet2x1(lr4x_))\n\n srresnet2x2_optimizer.zero_grad()\n #### Edgenet Loss ####\n pred = edgenet(sr4x_)\n real = edgenet(hr_)\n\n # edge_loss_4x = 0\n edge_loss_4x = BCE_loss(pred.detach(), real.detach())\n # for i in range(6):\n # edge_loss_4x += edge_trade_off[i] * \\\n # BCE_loss(pred[i].detach(), real[i].detach())\n # edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])\n\n #### Content Loss ####\n content_loss_4x = MSE_loss(sr4x_, hr_) #+ 0.1*BCE_loss(1-sr4x_, 1-hr_)\n\n #### Perceptual Loss ####\n real_feature = featuremapping(hr_)\n fake_feature = featuremapping(sr4x_)\n vgg_loss_4x = MSE_loss(fake_feature, real_feature.detach())\n\n #### Adversarial Loss ####\n advs_loss_4x = BCE_loss(discnet(sr4x_), real_label) if epoch + 1 > self.pretrain_epochs else 0\n # advs_loss_4x = 0\n\n #============== loss backward ===============#\n total_loss_4x = a1 * edge_loss_4x + a2 * advs_loss_4x + \\\n a3 * content_loss_4x + (1.0 - a3) * vgg_loss_4x\n\n # total_loss_4x = 1.0 * content_loss_4x + 0.25 * vgg_loss_4x\n\n total_loss_4x.backward()\n srresnet2x2_optimizer.step()\n\n #============ calculate scores ==============#\n # psnr_4x_score_process = batch_compare_filter(\n # sr4x_.cpu().data, hr, PSNR)\n # psnr_4x_avg.add(psnr_4x_score_process)\n\n # ssim_4x_score_process = batch_compare_filter(\n # sr4x_.cpu().data, hr, SSIM)\n # ssim_4x_avg.add(ssim_4x_score_process)\n\n if is_fine_tune:\n total_avg_loss.add(total_loss_4x.data.item())\n edge_avg_loss.add(edge_loss_4x.data.item())\n else:\n total_avg_loss.add((total_loss_2x+total_loss_4x).data.item())\n edge_avg_loss.add((edge_loss_2x+edge_loss_4x).data.item())\n if epoch + 1 > self.pretrain_epochs:\n disc_avg_loss.add((advs_loss_2x+advs_loss_4x).data.item())\n\n if (ii+1) % self.plot_iter == self.plot_iter-1:\n res = {'edge loss': edge_avg_loss.value()[0],\n 'generate loss': total_avg_loss.value()[0],\n 'discriminate loss': disc_avg_loss.value()[0]}\n vis.plot_many(res, 'Deblur net Loss')\n\n # psnr_2x_score_origin = batch_compare_filter(\n # bc2x, lr2x, PSNR)\n # psnr_4x_score_origin = batch_compare_filter(bc4x, hr, PSNR)\n # res_psnr = {'2x_origin_psnr': psnr_2x_score_origin,\n # '2x_sr_psnr': psnr_2x_score_process,\n # '4x_origin_psnr': psnr_4x_score_origin,\n # '4x_sr_psnr': psnr_4x_score_process}\n # vis.plot_many(res_psnr, 'PSNR Score')\n\n # ssim_2x_score_origin = batch_compare_filter(\n # bc2x, lr2x, SSIM)\n # ssim_4x_score_origin = batch_compare_filter(bc4x, hr, SSIM)\n # res_ssim = {'2x_origin_ssim': ssim_2x_score_origin,\n # '2x_sr_ssim': ssim_2x_score_process,\n # '4x_origin_ssim': ssim_4x_score_origin,\n # '4x_sr_ssim': ssim_4x_score_process}\n # vis.plot_many(res_ssim, 'SSIM Score')\n\n #======================= Output result of total training processing =======================#\n itcnt += 1\n # itbar.set_description(\"Epoch: [%2d] [%d/%d] PSNR_2x_Avg: %.6f, SSIM_2x_Avg: %.6f, PSNR_4x_Avg: %.6f, SSIM_4x_Avg: %.6f\"\n # % ((epoch + 1), (ii + 1), len(train_data_loader),\n # psnr_2x_avg.value()[0], ssim_2x_avg.value()[\n # 0],\n # psnr_4x_avg.value()[0], ssim_4x_avg.value()[0]))\n itbar.set_description(\"Epoch: [%2d] [%d/%d]\"\n % ((epoch + 1), (ii + 1), len(train_data_loader)))\n\n if (ii+1) % self.plot_iter == self.plot_iter-1:\n # test_ = deblurnet(torch.cat([y_.detach(), x_edge], 1))\n hr_edge = edgenet(hr_)\n sr2x_edge = edgenet(sr2x_)\n sr4x_edge = edgenet(sr4x_)\n\n vis.images(hr_edge.cpu().data, win='HR edge predict', opts=dict(\n title='HR edge predict'))\n vis.images(sr2x_edge.cpu().data, win='SR2X edge predict', opts=dict(\n title='SR2X edge predict'))\n vis.images(sr4x_edge.cpu().data, win='SR4X edge predict', opts=dict(\n title='SR4X edge predict'))\n\n vis.images(lr2x, win='LR2X image',\n opts=dict(title='LR2X image'))\n vis.images(lr4x, win='LR4X image',\n opts=dict(title='LR4X image'))\n vis.images(bc2x, win='BC2X image',\n opts=dict(title='BC2X image'))\n vis.images(bc4x, win='BC4X image',\n opts=dict(title='BC4X image'))\n vis.images(sr2x_.cpu().data, win='SR2X image',\n opts=dict(title='SR2X image'))\n vis.images(sr4x_.cpu().data, win='SR4X image',\n opts=dict(title='SR4X image'))\n\n vis.images(hr, win='HR image',\n opts=dict(title='HR image'))\n\n t_save_dir = 'results/train_result/'+self.train_dataset\n if not os.path.exists(t_save_dir):\n os.makedirs(t_save_dir)\n\n if (epoch + 1) % self.save_epochs == 0 and (ii+1) % 200 == 0:\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n\n if (epoch + 1) % self.save_epochs == 0:\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n\n # Save final trained model and results\n vis.save([self.env])\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, self.num_epochs))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, self.num_epochs))\n\n def test(self, sr2x1_path=None, sr2x2_path=None):\n test_data_dir = os.path.join(self.data_dir, self.test_dataset)\n result_data_dir = os.path.join(self.save_dir, \"test_results\", \"2x2UnitNet_SR_\"+self.test_dataset)\n if not os.path.exists(result_data_dir):\n os.makedirs(result_data_dir)\n\n # judge whether model exists\n if not os.path.exists(sr2x1_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_path):\n raise Exception('sr2x2 resnet model not exists')\n\n # load network params\n # srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n # srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x1.load_state_dict(torch.load(sr2x1_path))\n srresnet2x2.load_state_dict(torch.load(sr2x2_path))\n\n if USE_GPU:\n srresnet2x1.cuda()\n srresnet2x2.cuda()\n\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n import time\n\n psnr_4x_avg = tnt.meter.AverageValueMeter()\n ssim_4x_avg = tnt.meter.AverageValueMeter()\n\n time_avg = tnt.meter.AverageValueMeter()\n\n srresnet2x1.eval()\n srresnet2x2.eval()\n\n # processing test data\n iterbar = tqdm(os.listdir(test_data_dir))\n import cv2\n import numpy as np\n for img_name in iterbar:\n try:\n img = cv2.imread(os.path.join(test_data_dir, img_name), cv2.IMREAD_COLOR)\n img = cv2.resize(img, None, None, 0.5, 0.5, interpolation=cv2.INTER_AREA)\n\n h, w, c = img.shape[0], img.shape[1], img.shape[2]\n w_lr4x, h_lr4x = int(\n w // self.scale_factor), int(h // self.scale_factor)\n w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2\n w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n w_num, h_num = w // self.crop_size, h // self.crop_size\n w_num += 1 if w % self.crop_size != 0 else 0\n h_num += 1 if h % self.crop_size != 0 else 0\n\n res = np.zeros((h*2, w*2, c), dtype=np.uint8)\n for i in range(w_num):\n l = i * self.crop_size\n l_new = l * 2\n r = min(l+self.crop_size, w)\n r_new = w * 2 if r == w else l_new + self.crop_size * 2\n for j in range(h_num):\n t = j * self.crop_size\n t_new = t * 2\n b = min(t+self.crop_size, h)\n b_new = h * 2 if b == h else t_new + self.crop_size * 2\n \n lr = img[t:b, l:r]\n\n lr = Transforms.ToTensor()(lr).unsqueeze(0)\n if USE_GPU:\n lr = lr.cuda()\n\n sr = srresnet2x1(lr).squeeze()\n\n res_sr = sr.cpu().data.clamp(0, 1).numpy().transpose(1, 2, 0)*255\n\n res[t_new:b_new, l_new:r_new] = res_sr\n \n cv2.imwrite(os.path.join(result_data_dir, img_name), res)\n except IOError:\n pass\n finally:\n pass\n\n\n # for img_name in iterbar:\n # try:\n # img = Image.open(os.path.join(test_data_dir, img_name)).convert(\"RGB\")\n # transform = Transforms.RandomCrop(self.crop_size)\n # img = transform(img)\n\n # w, h = img.size[0], img.size[1]\n # w_lr4x, h_lr4x = int(\n # w // self.scale_factor), int(h // self.scale_factor)\n # w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2\n # # w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n # # transform tensor\n # # hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)\n # # lr2x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)\n # lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)\n # lr4x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)\n\n # # hr_ = Transforms.ToTensor()(hr).unsqueeze(0)\n # # lr2x_ = Transforms.ToTensor()(lr2x).unsqueeze(0)\n # lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)\n\n # if USE_GPU:\n # # hr_ = hr_.cuda()\n # # lr2x_ = lr2x_.cuda()\n # lr4x_ = lr4x_.cuda()\n\n # torch.cuda.synchronize()\n # start = time.time()\n\n # sr4x_ = srresnet2x2(srresnet2x1(lr4x_))\n # # sr4x_ = srresnet2x1(lr4x_)\n\n # torch.cuda.synchronize()\n # end = time.time()\n\n # time_avg.add(end-start)\n # except IOError:\n # pass\n # finally:\n # pass\n\n # # calculate PSNR & SSIM\n # psnr_4x_score = batch_compare_filter(\n # sr4x_.cpu().data, hr_, PSNR)\n # ssim_4x_score = batch_compare_filter(\n # sr4x_.cpu().data, hr_, SSIM)\n # psnr_4x_avg.add(psnr_4x_score)\n # ssim_4x_avg.add(ssim_4x_score)\n\n # # save image\n # save_img(sr4x_.cpu().data, os.path.join(result_data_dir, img_name))\n\n print(time_avg.value()[0])\n print(\"final PSNR score: {}\".format(psnr_4x_avg.value()[0]))\n print(\"final SSIM score: {}\".format(ssim_4x_avg.value()[0]))\n\n def test_t(self, sr2x1_1_path=None, sr2x2_1_path=None, sr2x1_2_path=None, sr2x2_2_path=None):\n test_data_dir = os.path.join(self.data_dir, self.test_dataset)\n \n sr_edge_dir = os.path.join(self.save_dir, \"show_results\", \"2x2UnitNet_Edge_SR_\"+self.test_dataset)\n if not os.path.exists(sr_edge_dir):\n os.makedirs(sr_edge_dir)\n\n sr_none_dir = os.path.join(self.save_dir, \"show_results\", \"2x2UnitNet_none_SR_\"+self.test_dataset)\n if not os.path.exists(sr_none_dir):\n os.makedirs(sr_none_dir)\n \n bc_dir = os.path.join(self.save_dir, \"show_results\", \"Bicubic_SR_\"+self.test_dataset)\n if not os.path.exists(bc_dir):\n os.makedirs(bc_dir)\n \n hr_dir = os.path.join(self.save_dir, \"show_results\", \"HR_\"+self.test_dataset)\n if not os.path.exists(hr_dir):\n os.makedirs(hr_dir)\n\n lr_dir = os.path.join(self.save_dir, \"show_results\", \"LR_\"+self.test_dataset)\n if not os.path.exists(lr_dir):\n os.makedirs(lr_dir)\n\n # judge whether model exists\n if not os.path.exists(sr2x1_1_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_1_path):\n raise Exception('sr2x2 resnet model not exists')\n if not os.path.exists(sr2x1_2_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_2_path):\n raise Exception('sr2x2 resnet model not exists')\n\n # load network params\n srresnet2x1_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x2_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x2_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1_edge.load_state_dict(torch.load(sr2x1_1_path))\n srresnet2x2_edge.load_state_dict(torch.load(sr2x2_1_path))\n srresnet2x1_none.load_state_dict(torch.load(sr2x1_2_path))\n srresnet2x2_none.load_state_dict(torch.load(sr2x2_2_path))\n\n if USE_GPU:\n srresnet2x1_edge.cuda()\n srresnet2x2_edge.cuda()\n srresnet2x1_none.cuda()\n srresnet2x2_none.cuda()\n\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n\n psnr_edge_4x_avg = tnt.meter.AverageValueMeter()\n ssim_edge_4x_avg = tnt.meter.AverageValueMeter()\n psnr_none_4x_avg = tnt.meter.AverageValueMeter()\n ssim_none_4x_avg = tnt.meter.AverageValueMeter()\n\n # srresnet2x1_edge.eval()\n # srresnet2x2_edge.eval()\n # srresnet2x1_none.eval()\n # srresnet2x2_none.eval()\n\n # processing test data\n iterbar = tqdm(os.listdir(test_data_dir))\n for img_name in iterbar:\n img = Image.open(os.path.join(test_data_dir, img_name)).convert(\"RGB\")\n transform = Transforms.RandomCrop(self.crop_size)\n img = transform(img)\n\n w, h = img.size[0], img.size[1]\n w_lr4x, h_lr4x = int(\n w // self.scale_factor), int(h // self.scale_factor)\n w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n # transform tensor\n hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)\n lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)\n bc4x = lr4x.resize((w_hr, h_hr), Image.BICUBIC)\n\n hr_ = Transforms.ToTensor()(hr).unsqueeze(0)\n bc4x_ = Transforms.ToTensor()(bc4x).unsqueeze(0)\n lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)\n\n if USE_GPU:\n hr_ = hr_.cuda()\n lr4x_ = lr4x_.cuda()\n\n sr4x_edge_ = srresnet2x2_edge(srresnet2x1_edge(lr4x_))\n sr4x_none_ = srresnet2x2_none(srresnet2x1_none(lr4x_))\n\n # calculate PSNR & SSIM\n psnr_edge_4x_score = batch_compare_filter(\n sr4x_edge_.cpu().data, hr_, PSNR)\n ssim_edge_4x_score = batch_compare_filter(\n sr4x_edge_.cpu().data, hr_, SSIM)\n psnr_edge_4x_avg.add(psnr_edge_4x_score)\n ssim_edge_4x_avg.add(ssim_edge_4x_score)\n\n psnr_none_4x_score = batch_compare_filter(\n sr4x_none_.cpu().data, hr_, PSNR)\n ssim_none_4x_score = batch_compare_filter(\n sr4x_none_.cpu().data, hr_, SSIM)\n psnr_none_4x_avg.add(psnr_none_4x_score)\n ssim_none_4x_avg.add(ssim_none_4x_score)\n\n # save image\n save_img(sr4x_edge_.cpu().data, os.path.join(sr_edge_dir, img_name))\n save_img(sr4x_none_.cpu().data, os.path.join(sr_none_dir, img_name))\n save_img(bc4x_.cpu().data, os.path.join(bc_dir, img_name))\n save_img(hr_.cpu().data, os.path.join(hr_dir, img_name))\n save_img(lr4x_.cpu().data, os.path.join(lr_dir, img_name))\n\n print(\"final edge PSNR score: {}\".format(psnr_edge_4x_avg.value()[0]))\n print(\"final edge SSIM score: {}\".format(ssim_edge_4x_avg.value()[0]))\n\n print(\"final none PSNR score: {}\".format(psnr_none_4x_avg.value()[0]))\n print(\"final none SSIM score: {}\".format(ssim_none_4x_avg.value()[0]))\n\n def save_model(self, model, save_dir, model_name, mtype='pkl'):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n if mtype == 'pkl':\n save_path = os.path.join(save_dir, model_name+'.pkl')\n torch.save(model.state_dict(), save_path)\n elif mtype == 'pth':\n save_path = os.path.join(save_dir, model_name+'.pth')\n torch.save(model.state_dict(), save_path)\n", "from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy\n\next_module = Extension(\n \"nms\",\n sources=[\"nms.pyx\"],\n extra_compile_args=[\"-std=c++11\"],\n language=\"c++\",\n include_dirs=[numpy.get_include()]\n)\n\nsetup(ext_modules=cythonize(ext_module,\n language_level=3,\n annotate=True))\n", "from roi_align.roi_align import RoIAlign # RoIAlign module\nfrom roi_align.roi_align import CropAndResize # crop_and_resize module\nfrom torchvision import transforms\nimport torch\nimport cv2\nimport numpy as np\nfrom torch.autograd import Variable\n\ndef to_varabile(data,requires_grad,is_cuda):\n if is_cuda:\n data = data.cuda()\n data = Variable(data,requires_grad=requires_grad)\n return data\n\n# input data\nis_cuda = torch.cuda.is_available()\n# image_data = cv2.imread('/data/2019AAAI/data/ctw15/test/text_image/1002.jpg')\nimage_data = np.ones((100,100,3))\nimage_data = image_data.transpose((2, 0, 1)).astype(np.float32)\nimage_data = torch.from_numpy((image_data))\nboxes_data = torch.Tensor([[0,0,200,200],[0,0,200,200]])\nbox_index_data = torch.IntTensor([0])\nimage = to_varabile(image_data, requires_grad=True, is_cuda=is_cuda)\nimage = image.unsqueeze(0)\nprint(image.size())\nboxes = to_varabile(boxes_data, requires_grad=False, is_cuda=is_cuda)\nbox_index = to_varabile(box_index_data, requires_grad=False, is_cuda=is_cuda)\nprint(image,boxes,box_index)\n# RoIAlign layer\nroi_align = RoIAlign(7, 7,extrapolation_value=0)\ncrops = roi_align(image, boxes, box_index)\nprint(crops)\n\n", "import sys\nimport torch\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport shutil\n\nfrom torch.autograd import Variable\nfrom torch.utils import data\nimport os\n\nfrom dataset import IC15Loader\nfrom metrics import runningScore\nimport models\nfrom util import Logger, AverageMeter\nimport time\nimport util\nfrom tensorboardX import SummaryWriter\n\nwriter = SummaryWriter()\n\n\ndef ohem_single(score, gt_text, training_mask):\n pos_num = (int)(np.sum(gt_text > 0.5)) - (int)(np.sum((gt_text > 0.5) & (training_mask <= 0.5)))\n \n if pos_num == 0:\n # selected_mask = gt_text.copy() * 0 # may be not good\n selected_mask = training_mask\n selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')\n return selected_mask\n \n neg_num = (int)(np.sum(gt_text <= 0.5))\n neg_num = (int)(min(pos_num * 3, neg_num))\n \n if neg_num == 0:\n selected_mask = training_mask\n selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')\n return selected_mask\n\n neg_score = score[gt_text <= 0.5]\n neg_score_sorted = np.sort(-neg_score)\n threshold = -neg_score_sorted[neg_num - 1]\n\n selected_mask = ((score >= threshold) | (gt_text > 0.5)) & (training_mask > 0.5)\n selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')\n return selected_mask\n\ndef ohem_batch(scores, gt_texts, training_masks):\n scores = scores.data.cpu().numpy()\n gt_texts = gt_texts.data.cpu().numpy()\n training_masks = training_masks.data.cpu().numpy()\n\n selected_masks = []\n for i in range(scores.shape[0]):\n selected_masks.append(ohem_single(scores[i, :, :], gt_texts[i, :, :], training_masks[i, :, :]))\n\n selected_masks = np.concatenate(selected_masks, 0)\n selected_masks = torch.from_numpy(selected_masks).float()\n\n return selected_masks\n\ndef dice_loss(input, target, mask):\n input = torch.sigmoid(input)\n\n input = input.contiguous().view(input.size()[0], -1)\n target = target.contiguous().view(target.size()[0], -1)\n mask = mask.contiguous().view(mask.size()[0], -1)\n \n input = input * mask\n target = target * mask\n\n a = torch.sum(input * target, 1)\n b = torch.sum(input * input, 1) + 0.001\n c = torch.sum(target * target, 1) + 0.001\n d = (2 * a) / (b + c)\n dice_loss = torch.mean(d)\n return 1 - dice_loss\n\ndef cal_text_score(texts, gt_texts, training_masks, running_metric_text):\n training_masks = training_masks.data.cpu().numpy()\n pred_text = torch.sigmoid(texts).data.cpu().numpy() * training_masks\n pred_text[pred_text <= 0.5] = 0\n pred_text[pred_text > 0.5] = 1\n pred_text = pred_text.astype(np.int32)\n gt_text = gt_texts.data.cpu().numpy() * training_masks\n gt_text = gt_text.astype(np.int32)\n running_metric_text.update(gt_text, pred_text)\n score_text, _ = running_metric_text.get_scores()\n return score_text\n\ndef cal_kernel_score(kernels, gt_kernels, gt_texts, training_masks, running_metric_kernel):\n mask = (gt_texts * training_masks).data.cpu().numpy()\n kernel = kernels[:, -1, :, :]\n gt_kernel = gt_kernels[:, -1, :, :]\n pred_kernel = torch.sigmoid(kernel).data.cpu().numpy()\n pred_kernel[pred_kernel <= 0.5] = 0\n pred_kernel[pred_kernel > 0.5] = 1\n pred_kernel = (pred_kernel * mask).astype(np.int32)\n gt_kernel = gt_kernel.data.cpu().numpy()\n gt_kernel = (gt_kernel * mask).astype(np.int32)\n running_metric_kernel.update(gt_kernel, pred_kernel)\n score_kernel, _ = running_metric_kernel.get_scores()\n return score_kernel\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n running_metric_text = runningScore(2)\n running_metric_kernel = runningScore(2)\n\n end = time.time()\n for batch_idx, (imgs, gt_texts, gt_kernels, training_masks) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n imgs = Variable(imgs.cuda())\n gt_texts = Variable(gt_texts.cuda())\n gt_kernels = Variable(gt_kernels.cuda())\n training_masks = Variable(training_masks.cuda())\n\n outputs = model(imgs)\n texts = outputs[:, 0, :, :]\n kernels = outputs[:, 1:, :, :]\n\n selected_masks = ohem_batch(texts, gt_texts, training_masks)\n selected_masks = Variable(selected_masks.cuda())\n\n loss_text = criterion(texts, gt_texts, selected_masks)\n \n loss_kernels = []\n mask0 = torch.sigmoid(texts).data.cpu().numpy()\n mask1 = training_masks.data.cpu().numpy()\n selected_masks = ((mask0 > 0.5) & (mask1 > 0.5)).astype('float32')\n selected_masks = torch.from_numpy(selected_masks).float()\n selected_masks = Variable(selected_masks.cuda())\n for i in range(6):\n kernel_i = kernels[:, i, :, :]\n gt_kernel_i = gt_kernels[:, i, :, :]\n loss_kernel_i = criterion(kernel_i, gt_kernel_i, selected_masks)\n loss_kernels.append(loss_kernel_i)\n loss_kernel = sum(loss_kernels) / len(loss_kernels)\n \n loss = 0.7 * loss_text + 0.3 * loss_kernel\n \n losses.update(loss.item(), imgs.size(0))\n \n if batch_idx % 100 == 0:\n writer.add_scalar('loss_text', loss_text, batch_idx)\n writer.add_scalar('loss_kernel', loss_kernel, batch_idx)\n writer.add_scalar('total_loss', loss, batch_idx)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n score_text = cal_text_score(texts, gt_texts, training_masks, running_metric_text)\n score_kernel = cal_kernel_score(kernels, gt_kernels, gt_texts, training_masks, running_metric_kernel)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_idx % 20 == 0:\n output_log = '({batch}/{size}) Batch: {bt:.3f}s | TOTAL: {total:.0f}min | ETA: {eta:.0f}min | Loss: {loss:.4f} | Acc_t: {acc: .4f} | IOU_t: {iou_t: .4f} | IOU_k: {iou_k: .4f}'.format(\n batch=batch_idx + 1,\n size=len(train_loader),\n bt=batch_time.avg,\n total=batch_time.avg * batch_idx / 60.0,\n eta=batch_time.avg * (len(train_loader) - batch_idx) / 60.0,\n loss=losses.avg,\n acc=score_text['Mean Acc'],\n iou_t=score_text['Mean IoU'],\n iou_k=score_kernel['Mean IoU'])\n print(output_log)\n sys.stdout.flush()\n\n return (losses.avg, score_text['Mean Acc'], score_kernel['Mean Acc'], score_text['Mean IoU'], score_kernel['Mean IoU'])\n\ndef adjust_learning_rate(args, optimizer, epoch):\n global state\n if epoch in args.schedule:\n args.lr = args.lr * 0.1\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr\n\ndef save_checkpoint(state, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n\ndef main(args):\n if args.checkpoint == '':\n args.checkpoint = \"checkpoints/ic15_%s_bs_%d_ep_%d\"%(args.arch, args.batch_size, args.n_epoch)\n if args.pretrain:\n if 'synth' in args.pretrain:\n args.checkpoint += \"_pretrain_synth\"\n else:\n args.checkpoint += \"_pretrain_ic17\"\n\n print ('checkpoint path: %s'%args.checkpoint)\n print ('init lr: %.8f'%args.lr)\n print ('schedule: ', args.schedule)\n sys.stdout.flush()\n\n if not os.path.isdir(args.checkpoint):\n os.makedirs(args.checkpoint)\n\n kernel_num = 7\n min_scale = 0.4\n start_epoch = 0\n\n data_loader = IC15Loader(is_transform=True, img_size=args.img_size, kernel_num=kernel_num, min_scale=min_scale)\n train_loader = torch.utils.data.DataLoader(\n data_loader,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=3,\n drop_last=True,\n pin_memory=True)\n\n if args.arch == \"resnet50\":\n model = models.resnet50(pretrained=False, num_classes=kernel_num)\n elif args.arch == \"resnet101\":\n model = models.resnet101(pretrained=True, num_classes=kernel_num)\n elif args.arch == \"resnet152\":\n model = models.resnet152(pretrained=True, num_classes=kernel_num)\n \n model = torch.nn.DataParallel(model).cuda()\n \n if hasattr(model.module, 'optimizer'):\n optimizer = model.module.optimizer\n else:\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.99, weight_decay=5e-4)\n\n title = 'icdar2015'\n if args.pretrain:\n print('Using pretrained model.')\n assert os.path.isfile(args.pretrain), 'Error: no checkpoint directory found!'\n print(args.pretrain)\n checkpoint = torch.load(args.pretrain)\n state = model.state_dict()\n for key in state.keys():\n if key in checkpoint.keys():\n state[key] = pretrained_model[key]\n model.load_state_dict(state)\n #model.load_state_dict(checkpoint['state_dict'])\n logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)\n logger.set_names(['Learning Rate', 'Train Loss','Train Acc.', 'Train IOU.'])\n elif args.resume:\n print('Resuming from checkpoint.')\n assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)\n else:\n print('Training from scratch.')\n logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)\n logger.set_names(['Learning Rate', 'Train Loss','Train Acc.', 'Train IOU.'])\n\n for epoch in range(start_epoch, args.n_epoch):\n adjust_learning_rate(args, optimizer, epoch)\n print('\\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.n_epoch, optimizer.param_groups[0]['lr']))\n\n train_loss, train_te_acc, train_ke_acc, train_te_iou, train_ke_iou = train(train_loader, model, dice_loss, optimizer, epoch)\n \n if epoch % 10 == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'lr': args.lr,\n 'optimizer' : optimizer.state_dict(),\n }, checkpoint=args.checkpoint)\n\n logger.append([optimizer.param_groups[0]['lr'], train_loss, train_te_acc, train_te_iou])\n logger.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--arch', nargs='?', type=str, default='resnet50')\n parser.add_argument('--img_size', nargs='?', type=int, default=640, \n help='Height of the input image')\n parser.add_argument('--n_epoch', nargs='?', type=int, default=60, \n help='# of the epochs')\n parser.add_argument('--schedule', type=int, nargs='+', default=[20, 40],\n help='Decrease learning rate at these epochs.')\n parser.add_argument('--batch_size', nargs='?', type=int, default=8, \n help='Batch Size')\n parser.add_argument('--lr', nargs='?', type=float, default=1e-3, \n help='Learning Rate')\n parser.add_argument('--resume', nargs='?', type=str, default=None, \n help='Path to previous saved model to restart from')\n parser.add_argument('--pretrain', nargs='?', type=str, default='/workspace/mnt/group/general-reg/denglei/code/PSENet-master/models/pre_train/resnet50-19c8e357.pth', \n help='Path to previous saved model to restart from')\n parser.add_argument('--checkpoint', default='/workspace/mnt/group/general-reg/denglei/code/PSENet-master/models/weights_LSVT', type=str, metavar='PATH',\n help='path to save checkpoint (default: checkpoint)')\n args = parser.parse_args()\n\n main(args)\n", "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nfrom torch.nn.parameter import Parameter\r\nimport math\r\n\r\nfrom component.stn import SpatialTransformer\r\ncapsule_dim = 2\r\n\r\nclass Relu_Caps(nn.Module):\r\n def __init__(self, num_C, num_D, theta=0.001, eps=0.0001):\r\n super(Relu_Caps, self).__init__()\r\n self.num_C = num_C\r\n self.num_D = num_D\r\n self.theta = theta\r\n self.eps = eps\r\n\r\n def forward(self, x):\r\n x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.shape[3])\r\n x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))\r\n x_length = torch.unsqueeze(x_length, 2)\r\n\r\n x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)\r\n # x_caps = F.relu(x_length - self.theta) * x_caps\r\n # print(x_caps)\r\n # print(F.relu(x_length - self.theta).size())\r\n # print(F.relu(x_length - self.theta))\r\n # if F.relu(x_length - self.theta):\r\n # pass\r\n # else:\r\n # x_caps = 0 * x_caps\r\n\r\n x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])\r\n return x\r\n\r\nclass Caps_BN(nn.Module):\r\n '''\r\n Input variable N*CD*H*W\r\n First perform normal BN without learnable affine parameters, then apply a C group convolution to perform per-capsule\r\n linear transformation\r\n '''\r\n\r\n def __init__(self, num_C, num_D):\r\n super(Caps_BN, self).__init__()\r\n self.BN = nn.BatchNorm2d(num_C * num_D)\r\n self.conv = nn.Conv2d(num_C * num_D, num_C * num_D, 1, groups=num_C)\r\n\r\n # eye = torch.FloatTensor(num_C, num_D, num_D).copy_(torch.eye(num_D)).view(num_C * num_D, num_D,\r\n # 1, 1)\r\n # self.conv.weight.data.copy_(eye)\r\n # self.conv.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n output = self.BN(x)\r\n output = self.conv(output)\r\n\r\n return output\r\n\r\n\r\nclass Caps_MaxPool(nn.Module):\r\n '''\r\n Input variable N*CD*H*W\r\n First get the argmax indices of capsule lengths, then tile the indices D time and apply the tiled indices to capsules\r\n '''\r\n\r\n def __init__(self, num_C, num_D, kernel_size, stride=None, padding=0, dilation=1):\r\n super(Caps_MaxPool, self).__init__()\r\n self.num_C = num_C\r\n self.num_D = num_D\r\n self.maxpool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation, return_indices=True)\r\n\r\n def forward(self, x):\r\n B = x.shape[0]\r\n H, W = x.shape[2:]\r\n x_caps = x.view(B, self.num_C, self.num_D, H, W)\r\n x_length = torch.sum(x_caps * x_caps, dim=2)\r\n x_length_pool, indices = self.maxpool(x_length)\r\n H_pool, W_pool = x_length_pool.shape[2:]\r\n indices_tile = torch.unsqueeze(indices, 2).expand(-1, -1, self.num_D, -1, -1).contiguous()\r\n indices_tile = indices_tile.view(B, self.num_C * self.num_D, -1)\r\n x_flatten = x.view(B, self.num_C * self.num_D, -1)\r\n output = torch.gather(x_flatten, 2, indices_tile).view(B, self.num_C * self.num_D, H_pool, W_pool)\r\n\r\n return output\r\n\r\n\r\nclass Relu_Adpt(nn.Module):\r\n def __init__(self, num_C, num_D, eps=0.0001):\r\n super(Relu_Adpt, self).__init__()\r\n self.num_C = num_C\r\n self.num_D = num_D\r\n self.eps = eps\r\n\r\n self.theta = Parameter(torch.Tensor(1, self.num_C, 1, 1, 1))\r\n self.theta.data.fill_(0.)\r\n\r\n def forward(self, x):\r\n x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.shape[3])\r\n x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))\r\n x_length = torch.unsqueeze(x_length, 2)\r\n x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)\r\n x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])\r\n return x\r\n\r\nclass Caps_Conv(nn.Module):\r\n def __init__(self, in_C, in_D, out_C, out_D, kernel_size, stride=1, padding=0, dilation=1, bias=False):\r\n super(Caps_Conv, self).__init__()\r\n self.in_C = in_C\r\n self.in_D = in_D\r\n self.out_C = out_C\r\n self.out_D = out_D\r\n self.conv_D = nn.Conv2d(in_C * in_D, in_C * out_D, 1, groups=in_C, bias=False)\r\n self.conv_C = nn.Conv2d(in_C, out_C, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)\r\n\r\n m = self.conv_D.kernel_size[0] * self.conv_D.kernel_size[1] * self.conv_D.out_channels\r\n self.conv_D.weight.data.normal_(0, math.sqrt(2. / m))\r\n n = self.conv_C.kernel_size[0] * self.conv_C.kernel_size[1] * self.conv_C.out_channels\r\n self.conv_C.weight.data.normal_(0, math.sqrt(2. / n))\r\n if bias:\r\n self.conv_C.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n x = self.conv_D(x)\r\n x = x.view(x.shape[0], self.in_C, self.out_D, x.shape[2], x.shape[3])\r\n x = torch.transpose(x, 1, 2).contiguous()\r\n x = x.view(-1, self.in_C, x.shape[3], x.shape[4])\r\n x = self.conv_C(x)\r\n x = x.view(-1, self.out_D, self.out_C, x.shape[2], x.shape[3])\r\n x = torch.transpose(x, 1, 2).contiguous()\r\n x = x.view(-1, self.out_C * self.out_D, x.shape[3], x.shape[4])\r\n\r\n return x\r\nclass Squash(nn.Module):\r\n def __init__(self, num_C, num_D, eps=0.0001):\r\n super(Squash, self).__init__()\r\n self.num_C = num_C\r\n self.num_D = num_D\r\n self.eps = eps\r\n\r\n def forward(self, x):\r\n x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.shape[3])\r\n x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))\r\n x_length = torch.unsqueeze(x_length, 2)\r\n x_caps = x_caps * x_length / (1 + self.eps + x_length * x_length)\r\n x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])\r\n return x\r\n\r\nclass RARE(nn.Module):\r\n\r\n def __init__(self, opt):\r\n nn.Module.__init__(self)\r\n\r\n from alphabet.alphabet import Alphabet\r\n self.n_class = len(Alphabet(opt.ADDRESS.ALPHABET))\r\n self.opt = opt\r\n\r\n # self.stn = SpatialTransformer(self.opt)\r\n self.cnn = self.getCNN_cap()\r\n self.rnn = self.getEncoder()\r\n # n_class,hidden_size,num_embedding,input_size\r\n # self.attention = Attention(self.n_class,256, 128,256)\r\n self.attention = Attention(256, 256, self.n_class, 128)\r\n\r\n\r\n # Spatial transformer localization-network\r\n self.localization = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=7),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(8, 10, kernel_size=5),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True)\r\n )\r\n\r\n # Regressor for the 3 * 2 affine matrix\r\n self.fc_loc = nn.Sequential(\r\n nn.Linear(10 * 4 * 21, 32),\r\n nn.ReLU(True),\r\n nn.Linear(32, 3 * 2)\r\n )\r\n\r\n # Initialize the weights/bias with identity transformation\r\n self.fc_loc[2].weight.data.fill_(0)\r\n self.fc_loc[2].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])\r\n\r\n def stn(self, x):\r\n xs = self.localization(x)\r\n # print(\"size:\", xs.size())\r\n xs = xs.view(-1, 10 * 4 * 21)\r\n theta = self.fc_loc(xs)\r\n theta = theta.view(-1, 2, 3)\r\n\r\n grid = F.affine_grid(theta, x.size())\r\n x = F.grid_sample(x, grid)\r\n\r\n return x\r\n\r\n def getCNN_cap(self):\r\n return nn.Sequential(\r\n\r\n nn.Conv2d(1, 64, 3, 1, 1),\r\n nn.ReLU(True),\r\n\r\n nn.MaxPool2d(2, 2),\r\n\r\n nn.Conv2d(64, 128, 3, 1, 1),\r\n nn.ReLU(True),\r\n\r\n nn.MaxPool2d(2, 2),\r\n\r\n nn.Conv2d(128, 256, 3, 1, 1),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(True),\r\n\r\n Caps_Conv(256, 1, 256, capsule_dim, 3, 1, 1),\r\n Relu_Adpt(256, capsule_dim),\r\n\r\n Caps_MaxPool(256, capsule_dim, (2, 2), (2, 1), (0, 1)),\r\n\r\n Caps_Conv(256, capsule_dim, 512, capsule_dim, 3, 1, 1),\r\n # Squash(512, capsule_dim),\r\n Caps_BN(512, capsule_dim),\r\n Relu_Adpt(512, capsule_dim),\r\n\r\n Caps_Conv(512, capsule_dim, 512, capsule_dim, 3, 1, 1),\r\n # Squash(512, capsule_dim),\r\n # Caps_BN(512, capsule_dim),\r\n Relu_Adpt(512, capsule_dim),\r\n\r\n Caps_MaxPool(512, capsule_dim, (2, 2), (2, 1), (0, 1)),\r\n\r\n Caps_Conv(512, capsule_dim, 512, 1, 2, 1, 0),\r\n # Squash(512, capsule_dim),\r\n Caps_BN(512, 1),\r\n )\r\n\r\n def getCNN(self):\r\n\r\n '''cnn'''\r\n nc = self.opt.IMAGE.IMG_CHANNEL\r\n '''\r\n nm: chanel number\r\n ks: kernel size\r\n ps: padding size\r\n ss: stride size\r\n '''\r\n nm = [64, 128, 256, 256, 512, 512, 512]\r\n ks = [3, 3, 3, 3, 3, 3, 2]\r\n ps = [1, 1, 1, 1, 1, 1, 0]\r\n ss = [1, 1, 1, 1, 1, 1, 1]\r\n\r\n cnn = nn.Sequential()\r\n\r\n def convRelu(i, batchNormalization=False, leakyRelu=False):\r\n nIn = nc if i == 0 else nm[i - 1]\r\n nOut = nm[i]\r\n cnn.add_module('conv{0}'.format(i),\r\n nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))\r\n if batchNormalization:\r\n cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))\r\n\r\n if leakyRelu:\r\n cnn.add_module('relu{0}'.format(i), nn.LeakyReLU(0.2, inplace=True))\r\n else:\r\n cnn.add_module('relu{0}'.format(i), nn.ReLU(True))\r\n\r\n # 32 * 100\r\n convRelu(0, False)\r\n # 32 * 100\r\n cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))\r\n # 16 * 50\r\n convRelu(1, False)\r\n # 16 * 50\r\n cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))\r\n # 8 * 25\r\n convRelu(2, True)\r\n convRelu(3, False)\r\n # 8 * 25\r\n cnn.add_module('pooling{0}'.format(2), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))\r\n # # 4 * 27\r\n convRelu(4, True)\r\n convRelu(5, False)\r\n # 4 * 27\r\n cnn.add_module('pooling{0}'.format(3), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))\r\n # 2 * 29\r\n convRelu(6, True)\r\n # 1 * ?\r\n # 也就是说,当图片的高为32时,经过卷积层之后,输出的特征图维度的高将变为1\r\n\r\n print(\"Initializing cnn net weights...\")\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n init.kaiming_normal_(m.weight.data)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n return cnn\r\n\r\n def getEncoder(self):\r\n\r\n rnn = nn.Sequential(\r\n BLSTM(512, 256, 256),\r\n BLSTM(256, 256, 256)\r\n )\r\n return rnn\r\n\r\n # image, length, text, text_rev, test\r\n def forward(self, input, text_length, text, text_rev, test=False):\r\n\r\n # input = self.stn(input)\r\n result = self.cnn(input)\r\n # (bs,512,1,5)\r\n\r\n # result = result.view(result.size(0), int(result.size(1) / capsule_dim), capsule_dim, result.size(2), result.size(3))\r\n # result = torch.sqrt(torch.sum(result * result, dim=2))\r\n # print(result.size())\r\n # result = result.squeeze(2)\r\n\r\n # print('hi', result.size())\r\n B, C, H, W = result.size()\r\n assert H == 1, 'The height of the input image must be 1.'\r\n result = result.squeeze(2)\r\n result = result.permute(2, 0, 1)\r\n\r\n result = self.rnn(result)\r\n '''feature, text_length, test sign'''\r\n # result = self.attention(result,text,text_length, test)\r\n result = self.attention(result, text_length, text, test)\r\n return result\r\n\r\n\r\nclass AttentionCell(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_embeddings=128, CUDA=True):\r\n super(AttentionCell, self).__init__()\r\n self.i2h = nn.Linear(input_size, hidden_size, bias=False)\r\n self.h2h = nn.Linear(hidden_size, hidden_size)\r\n self.score = nn.Linear(hidden_size, 1, bias=False)\r\n self.rnn = nn.GRUCell(input_size + num_embeddings, hidden_size)\r\n self.hidden_size = hidden_size\r\n self.input_size = input_size\r\n self.num_embeddings = num_embeddings\r\n # self.fracPickup = fracPickup(CUDA=CUDA)\r\n\r\n def forward(self, prev_hidden, feats, cur_embeddings, test=False):\r\n nT = feats.size(0)\r\n nB = feats.size(1)\r\n nC = feats.size(2)\r\n hidden_size = self.hidden_size\r\n\r\n feats_proj = self.i2h(feats.view(-1, nC))\r\n prev_hidden_proj = self.h2h(prev_hidden).view(1, nB, hidden_size).expand(nT, nB, hidden_size).contiguous().view(\r\n -1, hidden_size)\r\n emition = self.score(F.tanh(feats_proj + prev_hidden_proj).view(-1, hidden_size)).view(nT, nB)\r\n\r\n alpha = F.softmax(emition, 0) # nB * nT\r\n\r\n if not test:\r\n # alpha_fp = self.fracPickup(alpha.unsqueeze(1).unsqueeze(2)).squeeze()\r\n context = (feats * alpha.view(nT, nB, 1).expand(nT, nB, nC)).sum(0).squeeze(0) # nB * nC\r\n if len(context.size()) == 1:\r\n context = context.unsqueeze(0)\r\n context = torch.cat([context, cur_embeddings], 1)\r\n cur_hidden = self.rnn(context, prev_hidden)\r\n return cur_hidden, alpha\r\n else:\r\n context = (feats * alpha.view(nT, nB, 1).expand(nT, nB, nC)).sum(0).squeeze(0) # nB * nC\r\n if len(context.size()) == 1:\r\n context = context.unsqueeze(0)\r\n context = torch.cat([context, cur_embeddings], 1)\r\n cur_hidden = self.rnn(context, prev_hidden)\r\n return cur_hidden, alpha\r\n\r\n\r\nclass Attention(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_classes, num_embeddings=128, CUDA=True):\r\n super(Attention, self).__init__()\r\n self.attention_cell = AttentionCell(input_size, hidden_size, num_embeddings, CUDA=CUDA)\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.generator = nn.Linear(hidden_size, num_classes)\r\n self.char_embeddings = Parameter(torch.randn(num_classes + 1, num_embeddings))\r\n self.num_embeddings = num_embeddings\r\n self.num_classes = num_classes\r\n self.cuda = CUDA\r\n\r\n # targets is nT * nB\r\n def forward(self, feats, text_length, text, test=False):\r\n\r\n nT = feats.size(0)\r\n nB = feats.size(1)\r\n nC = feats.size(2)\r\n hidden_size = self.hidden_size\r\n input_size = self.input_size\r\n assert (input_size == nC)\r\n assert (nB == text_length.numel())\r\n\r\n num_steps = text_length.data.max()\r\n num_labels = text_length.data.sum()\r\n\r\n if not test:\r\n\r\n targets = torch.zeros(nB, num_steps + 1).long()\r\n if self.cuda:\r\n targets = targets.cuda()\r\n start_id = 0\r\n\r\n for i in range(nB):\r\n targets[i][1:1 + text_length.data[i]] = text.data[start_id:start_id + text_length.data[i]] + 1\r\n start_id = start_id + text_length.data[i]\r\n targets = Variable(targets.transpose(0, 1).contiguous())\r\n\r\n output_hiddens = Variable(torch.zeros(num_steps, nB, hidden_size).type_as(feats.data))\r\n hidden = Variable(torch.zeros(nB, hidden_size).type_as(feats.data))\r\n\r\n for i in range(num_steps):\r\n cur_embeddings = self.char_embeddings.index_select(0, targets[i])\r\n hidden, alpha = self.attention_cell(hidden, feats, cur_embeddings, test)\r\n output_hiddens[i] = hidden\r\n\r\n new_hiddens = Variable(torch.zeros(num_labels, hidden_size).type_as(feats.data))\r\n b = 0\r\n start = 0\r\n\r\n for length in text_length.data:\r\n new_hiddens[start:start + length] = output_hiddens[0:length, b, :]\r\n start = start + length\r\n b = b + 1\r\n\r\n probs = self.generator(new_hiddens)\r\n return {\r\n 'result': probs\r\n }\r\n\r\n else:\r\n\r\n hidden = Variable(torch.zeros(nB, hidden_size).type_as(feats.data))\r\n targets_temp = Variable(torch.zeros(nB).long().contiguous())\r\n probs = Variable(torch.zeros(nB * num_steps, self.num_classes))\r\n if self.cuda:\r\n targets_temp = targets_temp.cuda()\r\n probs = probs.cuda()\r\n\r\n for i in range(num_steps):\r\n cur_embeddings = self.char_embeddings.index_select(0, targets_temp)\r\n hidden, alpha = self.attention_cell(hidden, feats, cur_embeddings, test)\r\n hidden2class = self.generator(hidden)\r\n probs[i * nB:(i + 1) * nB] = hidden2class\r\n _, targets_temp = hidden2class.max(1)\r\n targets_temp += 1\r\n\r\n probs = probs.view(num_steps, nB, self.num_classes).permute(1, 0, 2).contiguous()\r\n probs = probs.view(-1, self.num_classes).contiguous()\r\n probs_res = Variable(torch.zeros(num_labels, self.num_classes).type_as(feats.data))\r\n b = 0\r\n start = 0\r\n\r\n for length in text_length.data:\r\n probs_res[start:start + length] = probs[b * num_steps:b * num_steps + length]\r\n start = start + length\r\n b = b + 1\r\n\r\n return {\r\n 'result': probs_res\r\n }\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass BLSTM(nn.Module):\r\n '''双向循环神经网络'''\r\n\r\n def __init__(self, nIn, nHidden, nOut):\r\n nn.Module.__init__(self)\r\n\r\n self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True, dropout=0.3)\r\n self.linear = nn.Linear(nHidden * 2, nOut)\r\n\r\n def forward(self, input):\r\n '''The size of input must be [T,B,C]'''\r\n T, B, C = input.size()\r\n result, _ = self.rnn(input)\r\n result = result.view(T * B, -1)\r\n result = self.linear(result)\r\n result = result.view(T, B, -1)\r\n return result\r\n" ]
[ [ "torch.ones", "torch.load", "scipy.misc.imsave", "torch.zeros", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.cuda.is_available", "numpy.zeros", "torch.nn.MSELoss", "torch.autograd.Variable" ], [ "numpy.get_include" ], [ "torch.Tensor", "torch.from_numpy", "numpy.ones", "torch.cuda.is_available", "torch.IntTensor", "torch.autograd.Variable" ], [ "torch.mean", "torch.sigmoid", "torch.load", "torch.sum", "torch.utils.data.DataLoader", "numpy.sort", "torch.from_numpy", "numpy.concatenate", "torch.nn.DataParallel", "numpy.sum", "torch.save" ], [ "torch.nn.functional.softmax", "torch.transpose", "torch.cat", "torch.zeros", "torch.sum", "torch.FloatTensor", "torch.nn.functional.tanh", "torch.randn", "torch.nn.functional.relu", "torch.nn.GRUCell", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.unsqueeze", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.Module.__init__", "torch.nn.LSTM", "torch.Tensor", "torch.nn.MaxPool2d", "torch.nn.functional.grid_sample", "torch.gather", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahesford/habis-tools
[ "82f82b99fa18452697404100edcf83bd03d35abc", "82f82b99fa18452697404100edcf83bd03d35abc" ]
[ "habis/formats.py", "habis/mpfilter.py" ]
[ "'''\nRoutines for manipulating HABIS data file formats.\n'''\n\n# Copyright (c) 2015 Andrew J. Hesford. All rights reserved.\n# Restrictions are listed in the LICENSE file distributed with this package.\n\nimport mmap\nimport numpy as np\nimport os\nimport struct\n\nfrom itertools import repeat\n\nfrom collections import OrderedDict\n\nfrom functools import reduce, partial\n\nimport warnings\n\nclass ArgparseLoader(object):\n\t'''\n\tA factory to load arguments provided to argparse.ArgumentParser using a\n\tprovided lodaer function with a defined set of options.\n\t'''\n\tdef __init__(self, loader, *args, **kwargs):\n\t\t'''\n\t\tCreate a callable that accepts a single string argument and,\n\t\twhen called, invokes the provided loader function with the\n\t\tstring as the first argument. All other positional and keyword\n\t\targuments are stored and passed to the loader following the\n\t\tstring.\n\t\t'''\n\t\tif not callable(loader):\n\t\t\traise TypeError('Argument \"loader\" must be callable')\n\n\t\t# Retain a reference to the loader\n\t\tself._loader = loader\n\n\t\t# Retain the mode and a copy of the arguments\n\t\tself._args = tuple(args)\n\t\tself._kwargs = kwargs\n\n\n\tdef __call__(self, string):\n\t\t'''\n\t\tInvoke the loader associated with this instance, passing string\n\t\tas the first argument and all associated positional and keyword\n\t\targuments thereafter.\n\n\t\tAny error encountered, will be converted to an\n\t\targparse.ArgumentTypeError.\n\t\t'''\n\t\tfrom argparse import ArgumentTypeError\n\n\t\ttry:\n\t\t\treturn self._loader(string, *self._args, **self._kwargs)\n\t\texcept Exception as err:\n\t\t\tmessage = f'failed to load {string}: {err}'\n\t\t\traise ArgumentTypeError(f'failed to load {string}: {err}')\n\n\n# Warnings and errors related to WaveformSet I/O\nclass WaveformSetIOWarning(UserWarning): pass\nclass WaveformSetIOError(Exception): pass\n\ndef strict_int(x):\n\tix = int(x)\n\tif ix != x:\n\t\traise ValueError('Argument must be integer-compatible')\n\treturn ix\n\n\ndef strict_nonnegative_int(x, positive=False):\n\tx = strict_int(x)\n\tif positive and x <= 0:\n\t\traise ValueError('Argument must be positive')\n\telif x < 0:\n\t\traise ValueError('Argument must be nonnegative')\n\treturn x\n\n\ndef renderAndLoadYaml(data, **kwargs):\n\t'''\n\tAttempt to render the string data as a Mako template with kwargs passed\n\tto the Mako renderer with string_undefined=True. Parse the rendered\n\tresult as YAML using yaml.safe_load.\n\n\tIf the Mako template engine cannot be imported, the data is parsed as\n\tpure YAML. Specifying kwargs when Mako cannot be imported raises a\n\tTypeError.\n\t'''\n\tfrom yaml import safe_load\n\n\ttry:\n\t\tfrom mako.template import Template\n\texcept ImportError:\n\t\tif kwargs:\n\t\t\traise TypeError('Extra keyword arguments '\n\t\t\t\t\t'require Mako template engine')\n\t\treturn safe_load(data)\n\telse:\n\t\ttmpl = Template(text=data, strict_undefined=True)\n\t\treturn safe_load(tmpl.render(**kwargs))\n\n\ndef loadmatlist(files, *a, **k):\n\t'''\n\tA conveience function to produce the ordered dictionary\n\n\t\tOrderedDict(sorted(kv for f in files\n\t\t\t\tfor kv in loadkeymat(f, *a, **k).iteritems()))\n\n\tIf files is a string instead of any other iterable, it will be replaced\n\twith glob.glob(files) before being inserted into the above constructor.\n\n\tWhen files is a string, a special keyword argument, forcematch, may be\n\tprovided. This argument will be stripped from the kwargs dictionary k\n\tand, when True, will cause an IOError to be raised if the glob matches\n\tno files. Otherwise, if forcematch is omitted or False, a glob that\n\tmatches no files will cause an empty map to be returned.\n\t'''\n\tif isinstance(files, str):\n\t\tfrom glob import glob\n\t\tfiles = glob(files)\n\t\tforcematch = k.pop('forcematch', False)\n\t\tif forcematch and not files: raise IOError('No matches for glob \"files\"')\n\n\treturn OrderedDict(sorted(kv for f in files\n\t\tfor kv in loadkeymat(f, *a, **k).items()))\n\n\ndef loadkeymat(f, scalar=None, dtype=None, nkeys=None):\n\t'''\n\tA convenience function that will attempt to load a mapping from f using\n\tloadz_keymat or (if loadz_keymat fails) loadtxt_keymat. The optional\n\targuments scalar and dtype, if not None, are passed as kwargs to either\n\tload function.\n\n\tIf nkeys is not None, it will be used to verify the cardinality of keys\n\tin a mapping returned by a successful call to loadz_keymat or passed as\n\tan argument to loadtxt_keymat.\n\t'''\n\t# Build optional kwargs\n\tkwargs = { }\n\tif scalar is not None: kwargs['scalar'] = scalar\n\tif dtype is not None: kwargs['dtype'] = dtype\n\n\ttry:\n\t\tmapping = loadz_keymat(f, **kwargs)\n\texcept (ValueError, IOError):\n\t\tif nkeys is not None: kwargs['nkeys'] = nkeys\n\t\treturn loadtxt_keymat(f, **kwargs)\n\n\tif nkeys is not None and len(mapping):\n\t\tkey = next(iter(mapping.keys()))\n\n\t\ttry: nk = len(key)\n\t\texcept TypeError: nk = 1\n\n\t\tif nkeys != nk:\n\t\t\traise ValueError('Cardinality of keys in mapping does not match nkeys parameter')\n\n\treturn mapping\n\n\ndef savez_keymat(f, mapping, sortrows=True, compressed=False, comment=None):\n\t'''\n\tStores mapping, which maps one or more integers to one or more\n\tnumerical values, into f (which may be a string providing a file name,\n\tor an open file-like object) using numpy.savez (if compressed is\n\tFalse) or numpy.savez_compressed (if compressed is True).\n\n\tAll keys must contain the same number of integers. Each value in the\n\tmapping may consiste of an arbitrary number of numeric values.\n\n\tIf sortrows is True, the data will be stored in an order determined by\n\tsorted(mapping.keys()). Otherwise, the row order is either arbitrary or\n\tenforced by the input map (e.g., an OrderedDict).\n\n\tThe saved npz file contains three arrays: 'keys', an N-by-M integer\n\tarray such that each row specifies an M-integer key in the input\n\tmapping; 'values', which stores the values of the mapping flattened\n\taccording to the order of 'keys', and 'lengths', which specifies the\n\tlength of the value array for each associated key. That is,\n\n\t\tmapping[keys[i]] = values[start:start+lengths[i]],\n\n\twhere start = sum(lengths[j] for 0 <= j < i).\n\n\tIf the lengths of the value lists for all keys are the same, the\n\t'lengths' array may be just a scalar value, in which case 'lengths[i]'\n\tshould be interpreted as '([lengths] * len(keys))[i]'.\n\n\tIf comment is not None, it should be a string that will be stored as an\n\textra array, called 'comment', in the output file. The comment will be\n\tignored when loading the file.\n\t'''\n\t# Make sure any comment is a string\n\tif comment is not None: exargs = { 'comment': str(comment) }\n\telse: exargs = { }\n\n\tkeys = sorted(mapping.keys()) if sortrows else list(mapping.keys())\n\n\t# Build the length array and flattened value array\n\tlengths, values = [ ], [ ]\n\tfor k in keys:\n\t\tv = mapping[k]\n\n\t\ttry:\n\t\t\tlengths.append(len(v))\n\t\t\tvalues.extend(v)\n\t\texcept TypeError:\n\t\t\tlengths.append(1)\n\t\t\tvalues.append(v)\n\n\tlengths = np.array(lengths)\n\tvalues = np.array(values)\n\n\t# Collapse lengths to scalar if possible\n\ttry: lv = lengths[0]\n\texcept IndexError: lv = 0\n\tif np.all(lengths == lv):\n\t\tlengths = np.array(lv)\n\n\n\t# Verify the value array\n\tif not np.issubdtype(values.dtype, np.number):\n\t\traise TypeError('Values in mapping must be numeric')\n\n\t# Verify the key array\n\tkeys = np.array(keys)\n\tif not np.issubdtype(keys.dtype, np.integer) or keys.ndim > 2:\n\t\traise TypeError('Keys in mapping consist of one more integers and must have consistent cardinality')\n\n\tsavez = np.savez_compressed if compressed else np.savez\n\tsavez(f, keys=keys, values=values, lengths=lengths, **exargs)\n\n\ndef loadz_keymat(*args, **kwargs):\n\t'''\n\tLoad and return, using numpy.load(*args, **kwargs), a mapping (created\n\twith savez_keymat) from one or more integers to one or more numerical\n\tvalues.\n\n\tIf the number of elements in every value array is 1, setting an\n\toptional keyword argument scalar (True by default) to False will\n\tpreserve the values as 1-element Numpy arrays. Otherwise, 1-element\n\tNumpy arrays will be collapsed to scalars. The scalar keyword argument\n\tis stripped from the kwargs and is not passed to numpy.load.\n\n\tThe data types of the value arrays can be forced by specifying an\n\toptional keyword argument dtype. The dtype argument will be stripped\n\tfrom the kwargs and is not passed to numpy.load.\n\n\tThe returned mapping is an OrderedDict that preserves the ordering of\n\tkeys in the input file.\n\n\tIf the loaded file does not contain a valid mapping in the style\n\tprepared by savez_keymat, a ValueError will be raised.\n\n\tIf the file contains a \"comment\" key, it will be silently ignored.\n\t'''\n\t# Pull specialty kwargs\n\tscalar = kwargs.pop('scalar', True)\n\tdtype = kwargs.pop('dtype', None)\n\n\ttry:\n\t\t# Load the file\n\t\twith np.load(*args, **kwargs) as data:\n\t\t\ttry:\n\t\t\t\tfiles = set(data.keys())\n\n\t\t\t\t# Ignore a comment in the file\n\t\t\t\ttry: files.remove('comment')\n\t\t\t\texcept KeyError: pass\n\n\t\t\t\t# Make sure all other fields are recognized\n\t\t\t\tif files != { 'keys', 'values', 'lengths' }: raise ValueError\n\t\t\texcept (AttributeError, ValueError):\n\t\t\t\traise ValueError('Unrecognized data structure in input')\n\n\t\t\tkeys = data['keys']\n\t\t\tvalues = data['values']\n\t\t\tlengths = data['lengths']\n\texcept AttributeError:\n\t\traise ValueError('Invalid file format')\n\n\t# Convert the data type if desired\n\tif dtype is not None:\n\t\tvalues = values.astype(dtype)\n\n\tif not np.issubdtype(keys.dtype, np.integer) or not 0 < keys.ndim < 3:\n\t\traise ValueError('Invalid mapping key structure')\n\n\tif not np.issubdtype(lengths.dtype, np.integer) or lengths.ndim > 1:\n\t\traise ValueError('Invalid mapping length structure')\n\n\tif not np.issubdtype(values.dtype, np.number) or values.ndim != 1:\n\t\traise ValueError('Invalid mapping value structure')\n\n\tif lengths.ndim == 1 and len(lengths) != len(keys):\n\t\traise ValueError('Mapping lengths and keys do not have equal lengths')\n\n\tnvals = np.sum(lengths) if lengths.ndim == 1 else (lengths * len(keys))\n\tif len(values) != nvals:\n\t\traise ValueError('Mapping values do not have appropriate lengths')\n\n\tif scalar:\n\t\t# Determine whether the mapped values can be collapsed to scalars\n\t\tif lengths.ndim == 0:\n\t\t\tscalar = lengths == 1\n\t\telse:\n\t\t\tscalar = (lengths.shape[0] > 0 and\n\t\t\t\t\tall(lv == 1 for lv in lengths))\n\n\t# Collapse 1-element keys to scalars\n\ttry: keys = keys.squeeze(axis=1)\n\texcept ValueError: pass\n\n\tif keys.ndim == 2:\n\t\t# Convert a list of key values to a tuple of Python scalars\n\t\tkeys = [ tuple(k.tolist()) for k in keys ]\n\telse:\n\t\t# Collapse a single key value to a single Python scalar\n\t\tkeys = [ k.tolist() for k in keys ]\n\n\tmapping = OrderedDict()\n\tstart = 0\n\n\tfor key, lv in zip(keys, lengths if lengths.ndim == 1 else repeat(lengths)):\n\t\tmapping[key] = values[start] if scalar else values[start:start+lv]\n\t\tstart += lv\n\n\treturn mapping\n\n\ndef loadtxt_keymat(*args, **kwargs):\n\t'''\n\tLoads a textual Numpy matrix by calling numpy.loadtxt(*args, **kwargs),\n\tthen converts the output to an OrderedDict mapping integers in some\n\tpositive number of leading columns to Numpy arrays composed of the\n\tremaining columns. The ouput dictionary preserves the ordering of rows\n\tin the input file.\n\n\tIf the number of remaining columns is 1, setting an optional keyword\n\targument scalar (default: True) to False will preserve 1-element Numpy\n\tarrays as the values of the dictionary. Otherwise, 1-element Numpy\n\tarrays in the dictionary values will be collapsed to scalars. The\n\tscalar keyword argument is stripped from kwargs and is not passed to\n\tnumpy.loadtxt.\n\n\tThe dimensionality of the text matrix will be forced to 2 by adding\n\tndmin=2 to the kwargs. Therefore, this value should not be specified in\n\targs or kwargs.\n\n\tAn optional keyword argument, nkeys (default: 1), will be stripped from\n\tkwargs to determine the number of leading columns to use as keys. If\n\tnkeys is 1, the keys will be single integers. For nkeys > 1, the keys\n\twill be tuples of integers.\n\t'''\n\t# Pull speciality kwargs\n\tnkeys = strict_nonnegative_int(kwargs.pop('nkeys', 1), positive=True)\n\tscalar = kwargs.pop('scalar', True)\n\n\t# Ensure the dimensionality is correctly specified\n\tkwargs['ndmin'] = 2\n\tmat = np.loadtxt(*args, **kwargs)\n\n\t_, ncol = mat.shape\n\n\tif nkeys >= ncol:\n\t\traise ValueError('Number of key columns must be less than number of columns in matrix')\n\n\tdef kvmaker(g):\n\t\tk = tuple(strict_int(gv) for gv in g[:nkeys])\n\t\tv = g[nkeys:]\n\t\tif len(k) < 2: k = k[0]\n\t\tif scalar and len(v) < 2: v = v[0]\n\t\treturn k, v\n\n\treturn OrderedDict(kvmaker(g) for g in mat)\n\n\ndef savetxt_keymat(*args, **kwargs):\n\t'''\n\tStores a dictionary mapping integers to sequences as a textual Numpy\n\tmatrix using numpy.savetxt(*args, **kwargs), where the keys become the\n\tleading columns of the matrix and the remaining columns are populated\n\tby the corresponding values.\n\n\tIf a format is specified as the 'fmt' argument to savetxt, it must\n\taccount for the extra columns populated by the keys.\n\n\tIf kwargs contains a 'sortrows' argument, the Boolean value (defaulting\n\tto True) for the argument determines whether the mapping is sorted by\n\tkeys prior to output. Without sorting, the row order is either\n\tarbitrary or enforced by the input map (e.g., an OrderedDict). This\n\targument is not forwarded to savetxt.\n\t'''\n\t# Pull the map\n\tif len(args) > 1:\n\t\tx = args[1]\n\telse:\n\t\tx = kwargs.pop('X')\n\n\tsortrows = kwargs.pop('sortrows', True)\n\n\tdef aslist(x):\n\t\ttry: return list(x)\n\t\texcept TypeError: return list([x])\n\n\trows = iter(x.items()) if not sortrows else sorted(x.items())\n\n\t# Convert the dictionary to a list of lists\n\tmat = [ aslist(k) + aslist(v) for k, v in rows ]\n\n\t# Overwrite the input argument for the matrix\n\tif len(args) > 1:\n\t\targs = tuple(a if i != 1 else mat for i, a in enumerate(args))\n\telse:\n\t\tkwargs['X'] = mat\n\n\tnp.savetxt(*args, **kwargs)\n\n\ndef findenumfiles(dir, prefix='.*?', suffix='', ngroups=1):\n\t'''\n\tFind all files in the directory dir with a name matching the regexp\n\tr'^<PREFIX>(-([0-9]+)){ngroups}<SUFFIX>$', where <PREFIX> is replaced\n\twith an optional prefix and <SUFFIX> is replaced with an optional\n\tsuffix to restrict the search, and return a list of tuples in which the\n\tfirst item is the name and subsequent entries are the matched integers\n\t(which will number ngroups) in left-to-right order.\n\t'''\n\tfrom os.path import join\n\tfrom re import compile as recomp\n\n\tif ngroups < 1:\n\t\traise ValueError('At least one number group must be specified')\n\n\t# Build the number-matching portion\n\tnumstr = '-([0-9]+)' * ngroups\n\t# Enumerate the matching groups (0 is the whole matching string)\n\tgrpidx = tuple(range(ngroups + 1))\n\t# Build the regexp and filter the list of files in the directory\n\tregexp = recomp(r'^%s%s%s$' % (prefix, numstr, suffix))\n\t# When converting matched groups to integers, discard the whole-string group\n\treturn [tuple([join(dir, f)] + [int(g) for g in m.group(*grpidx)[1:]])\n\t\t\tfor f in os.listdir(dir) for m in [regexp.match(f)] if m]\n\n\ndef specreptype():\n\t'''\n\tReturns a numpy data type consisting of a 64-bit complex component,\n\tlabeled 'val', which stores the magnitude of a spectral component and a\n\t64-bit integer, labeled 'idx', which stores the component's FFT index.\n\t'''\n\treturn np.dtype([('val', np.complex64), ('idx', np.int64)])\n\n\ndef splitspecreps(a):\n\t'''\n\tBreak a record array a of concatenated spectral representations, with\n\tdtype habis.formats.specreptype(), into a list of record arrays\n\tcorresponding to each group of spectral representations in the original\n\tarray. The number of records in the first group (output[0]) is\n\tspecified by n[0] = (a[0]['idx'] + 1), with output[0] = a[:n[0]].\n\n\tThe number of records in a subsequent group (output[i]) is given by\n\n\t\tn[i] = (a[sum(n[:i-1])]['idx'] + 1),\n\n\twith output[i] = a[sum(n[:i-1]):sum(n[:i])].\n\t'''\n\tstart = 0\n\toutput = []\n\twhile start < len(a):\n\t\tnvals = a[start]['idx'] + 1\n\t\tif nvals < 1: raise ValueError('Spectral representation counts must be positive')\n\t\tgrp = a[start:start+nvals]\n\t\tif len(grp) < nvals: raise ValueError('Could not read specified number of records')\n\t\toutput.append(a[start:start+nvals])\n\t\tstart += nvals\n\treturn output\n\n\ndef countspecreps(f):\n\t'''\n\tFor a file f that contains sequence of spectral representations, return\n\tthe number of components in each group within the sequence. Thus, if A\n\trepresents the array of habis.formats.specreptype() records listed in the\n\tfile f, the output array n will have\n\n\t\tn[0] = (A[0]['idx'] + 1), and\n\t\tn[i] = (A[sum(n[:i-1])]['idx'] + 1).\n\t'''\n\tdtype = specreptype()\n\t# Open the file and determine its size\n\tinfile = open(f, 'rb')\n\tinfile.seek(0, os.SEEK_END)\n\tfend = infile.tell()\n\tinfile.seek(0, os.SEEK_SET)\n\t# Scan through the file to pick up all of the counts\n\tn = []\n\twhile (infile.tell() < fend):\n\t\t# Read the header record and add it to the list\n\t\tnrec = np.fromfile(infile, dtype=dtype, count=1)[0]['idx']\n\t\tn.append(nrec + 1)\n\t\t# Skip over the records for this group\n\t\tinfile.seek(nrec * dtype.itemsize, os.SEEK_CUR)\n\n\treturn n\n\n\ndef repreducer(n):\n\t'''\n\tThis is a factory function that returns a reducer function, suitable\n\tfor use in readfiresequence and readfirecapture, which selects only\n\trows whose repetition index matches the specified integer n.\n\t'''\n\tdef reducefunc(mat): return mat[mat[:,1].astype(int) == n]\n\treturn reducefunc\n\n\ndef readfirecapture(f, reducer=None):\n\t'''\n\tRead the capture of a single HABIS fire sequence (with any number of\n\ttransmit repetitions) in CSV format. The file has 4 header lines and is\n\tcomma-delimited. The format of each line is a sequence of integers\n\n\t\tchannel, repetition, samples...\n\n\twhere samples are in the range [-8192,8192). Channel values are indexed\n\tfrom zero.\n\n\tThe data is sorted first by channel and then by repetition index before\n\tprocessing.\n\n\tThe return value is a tuple (output, channels, repetitions), where\n\toutput is 3-D array of the form output[i,j,k], where i is the receive\n\tchannel index, j is the repetition, and k is the sample index. Every\n\treceive channel must contain the same number of repetitions or a\n\tValueError will be raised. The list channels contains elements that\n\tindicate the channel indices identified in the file, such that\n\tchannels[i] is the listed channel index for slice output[i,:,:].\n\tThe list repetitions is similarly defined such that reptitions[j] is\n\tthe listed repetition index for slice output[:,j,:].\n\n\tIf reducer is not None, it should be a callable that takes as input the\n\traw array data read from f and returns a filtered version of the data\n\tthat will be processed as that were the raw data read from the file.\n\t'''\n\tfrom pandas import read_csv\n\t# Read the data and use the reducer filter if appropriate\n\tdata = read_csv(f, skiprows=4, header=None).values\n\t# If reducer is None, a TypeError is raised; just ignore it\n\ttry: data = reducer(data)\n\texcept TypeError: pass\n\n\t# Sort the data according to channel and repetition\n\tidx = sorted((d[0], d[1], i) for i, d in enumerate(data[:,:2]))\n\tdata = data[[v[-1] for v in idx]]\n\t# Count the channels and reptitions\n\tdef counter(x, y):\n\t\t\"Count the channel and repetition in a result dictionary tuple\"\n\t\ttry: x[0][y[0]] += 1\n\t\texcept KeyError: x[0][y[0]] = 1\n\t\ttry: x[1][y[1]] += 1\n\t\texcept KeyError: x[1][y[1]] = 1\n\t\treturn x\n\tchannels, repetitions = reduce(counter, idx, ({}, {}))\n\t# Ensure that all channels have the same repetition count\n\tif len(set(channels.values())) != 1:\n\t\traise ValueError('All channels must have the same number of reptitions')\n\tif len(set(repetitions.values())) != 1:\n\t\traise ValueError('Each channel must have same set of reptition indices')\n\n\t# Strip out the channel and repetition indices\n\tchannels = sorted(channels.keys())\n\trepetitions = sorted(repetitions.keys())\n\n\tnchan = len(channels)\n\tnreps = len(repetitions)\n\tnsamps = data.shape[-1] - 2\n\n\treturn data[:,2:].reshape((nchan, nreps, nsamps)), channels, repetitions\n\n\ndef readfiresequence(fmt, findx, reducer=None):\n\t'''\n\tRead a series of HABIS fire capture fires whose names are given by the\n\tPython format string fmt. The string fmt is passed to the format\n\tfunction with each value in the sequence findx to produce a unique\n\tfilename. The output arrays of readfirecapture() are collected, in\n\tsequence, and concatenated along a new first axis.\n\n\tThe channel and reptition indices returned by readfirecapture() are\n\tignored. However, because np.concatenate() is used to produce the\n\tconcatenated output, every readfirecapture() array must have the same\n\tshape.\n\n\tThe reducer is passed to readfirecapture for processing per-fire data.\n\t'''\n\tdata = [readfirecapture(fmt.format(f), reducer=reducer)[0][np.newaxis,:,:,:]\n\t\t\tfor f in findx]\n\treturn np.concatenate(data, axis=0)\n\n\nclass TxGroupIndex(tuple):\n\t'''\n\tA class to encapsulate and type-check transmit-index pairs.\n\t'''\n\tdef __new__(cls, lidx, gidx):\n\t\t'''\n\t\tCreate a new TxGroupIndex with local index lidx and\n\t\tgroup index gidx.\n\t\t'''\n\t\tlidx = strict_nonnegative_int(lidx)\n\t\tgidx = strict_nonnegative_int(gidx)\n\t\treturn tuple.__new__(cls, (lidx, gidx))\n\t@property\n\tdef idx(self): return self[0]\n\t@property\n\tdef grp(self): return self[1]\n\n\tdef signForTx(self, transmission, group):\n\t\t'''\n\t\tReturn the sign (-1, 0, 1) of the given transmission\n\t\tnumber and group for this transmit and group index.\n\t\t'''\n\t\t# If the groups don't match, the sign is zero\n\t\tif group != self.grp: return 0\n\n\t\t# Count number of common bits in transmission and idx\n\t\ttxcom = strict_nonnegative_int(transmission) & self.idx\n\t\tcount = 0\n\t\twhile txcom:\n\t\t\ttxcom &= txcom - 1\n\t\t\tcount += 1\n\n\t\t# Sign is +1 for even number of common bits\n\t\treturn 1 - 2 * (count % 2)\n\n\nclass TxGroupConfiguration(tuple):\n\t'''\n\tA class to encapsulate and type-check transmit-group configurations.\n\t'''\n\tdef __new__(cls, count, size):\n\t\t'''\n\t\tCreate a new TxGroupConfiguration.\n\t\t'''\n\t\tcount = strict_nonnegative_int(count)\n\t\tsize = strict_nonnegative_int(size)\n\t\treturn tuple.__new__(cls, (count, size))\n\n\t@property\n\tdef count(self): return self[0]\n\t@property\n\tdef size(self): return self[1]\n\t@property\n\tdef maxtx(self): return self[0] * self[1]\n\n\nclass RxChannelHeader(tuple):\n\t'''\n\tA class to encapsulate and type-check receive-channel headers\n\tin WaveformSet files.\n\t'''\n\tdef __new__(cls, idx, pos, win, txgrp=None):\n\t\t'''\n\t\tCreate a new header for receive channel idx,\n\t\telement location pos = (px, py, pz), and data window\n\t\twin = (start, length). The transmit group txgrp may\n\t\teither be None or (index, group).\n\t\t'''\n\t\tfrom .sigtools import Window\n\t\tidx = strict_nonnegative_int(idx)\n\t\tpx, py, pz = pos\n\t\tpos = tuple(float(p) for p in (px, py, pz))\n\t\t# Force the window start to be nonnegative\n\t\twin = Window(win, nonneg=True)\n\t\tif txgrp is not None: txgrp = TxGroupIndex(*txgrp)\n\t\treturn tuple.__new__(cls, (idx, pos, win, txgrp))\n\t@property\n\tdef idx(self): return self[0]\n\t@property\n\tdef pos(self): return self[1]\n\t@property\n\tdef win(self): return self[2]\n\t@property\n\tdef txgrp(self): return self[3]\n\n\tdef copy(self, **kwargs):\n\t\t\"Copy the header, optionally replacing certain properties.\"\n\t\tkeys = ['idx', 'pos', 'win', 'txgrp']\n\t\tprops = dict((key, kwargs.pop(key, getattr(self, key))) for key in keys)\n\t\tif len(kwargs):\n\t\t\traise TypeError(\"Unrecognized keyword '%s'\" % (next(iter(kwargs.keys())),))\n\t\treturn type(self)(**props)\n\n\nclass WaveformSet(object):\n\t'''\n\tA class to encapsulate a (possibly multi-facet) set of pulse-echo\n\tmeasurements from a single target.\n\t'''\n\t# A bidirectional mapping between typecodes and Numpy dtype names\n\tfrom pycwp.util import bidict\n\ttypecodes = bidict({b'I2': 'int16', b'I4': 'int32', b'I8': 'int64', b'F2': 'float16',\n\t\t\tb'F4': 'float32', b'F8': 'float64', b'C4': 'complex64', b'C8': 'complex128'})\n\n\t@staticmethod\n\tdef _get_open(f=None, compression=None):\n\t\t'''\n\t\tReturn the appropriate open function to handle optionally\n\t\tcompressed files and a Boolean that is True iff compression was\n\t\tdetected or requested.\n\n\t\tIf f is not None, it should be the name of an existing file.\n\t\tThe python-magic module will be used to determine whether\n\t\tgzip.open, bz2.open or the regular open should be used to read\n\t\tthe file. The \"compression\" argument in this case is ignored.\n\n\t\tIf f is None, then compression should be one of None, 'gzip' or\n\t\t'bz2'.\n\t\t'''\n\t\timport bz2, gzip\n\t\topeners = { 'bz2': bz2.open, 'gzip': gzip.open, '': open }\n\n\t\tif not f:\n\t\t\tcompression = (compression or '').strip().lower()\n\t\t\terrmsg = 'Value of compression must be None, \"gzip\" or \"bz2\"'\n\t\telse:\n\t\t\ttry: import magic\n\t\t\texcept ImportError: mime = ''\n\t\t\telse: mime = magic.Magic(mime=True).from_file(f).lower()\n\n\t\t\tcompression = { 'application/x-gzip': 'gzip',\n\t\t\t\t\t'application/x-bzip2': 'bz2' }.get(mime, '')\n\t\t\terrmsg = 'Unable to determine file compression scheme'\n\n\t\ttry: return (openers[compression], compression != '')\n\t\texcept KeyError: raise ValueError(errmsg)\n\n\n\t@classmethod\n\tdef fromwaveform(cls, wave, copy=False, hdr=None, rid=0, tid=0, f2c=0):\n\t\t'''\n\t\tCreate a new WaveformSet object with a single transmit index\n\t\tand a single receive index with a sample count and data type\n\t\tdefined by the provided Waveform wave. The sole waveform record\n\t\twill be populated with wave.\n\n\t\tIf copy is False, the record in the WaveformSet will, whenever\n\t\tpossible, capture a reference to the waveform data instead of\n\t\tmaking a copy. If copy is True, a copy will always be made.\n\n\t\tIf hdr is not None, it should be a receive-channel header that\n\t\twill be used for the single receive-channel record in the\n\t\toutput WaveformSet. The value of hdr.win will be overwritten\n\t\twith wave.datawin, and the value of rid will be ignored.\n\n\t\tIf hdr is None, a default header\n\n\t\t\t(rid, [0., 0., 0.], wave.datawin)\n\n\t\twill be used.\n\n\t\tThe parameter tid should be a single nonnegative integer that\n\t\tspecifies the transmit index to assign to the Waveform.\n\n\t\tThe parameter f2c should be a single nonnegative integer that\n\t\tspecifies the fire-to-capture delay to encode in the set.\n\t\t'''\n\t\t# Create the set\n\t\twset = cls(1, tid, wave.nsamp, f2c, wave.dtype)\n\n\t\tif hdr is None:\n\t\t\t# Create a default header\n\t\t\thdr = RxChannelHeader(rid, [0.]*3, wave.datawin)\n\t\telse:\n\t\t\t# Ensure hdr is RxChannelHeader, then set datawin\n\t\t\thdr = RxChannelHeader(*hdr).copy(win=wave.datawin)\n\n\t\twset.setrecord(hdr, wave.getsignal(wave.datawin), copy)\n\t\treturn wset\n\n\n\t@classmethod\n\tdef empty_like(cls, wset, with_context=True):\n\t\t'''\n\t\tCreate a new instance of WaveformSet configured exactly as\n\t\twset, except without any waveform records.\n\n\t\tIf with_context is True, the dictionary wset.context will be\n\t\tcopied (shallowly) into the created WaveformSet. Otherwise, the\n\t\tcontext of the created WaveformSet will be empty\n\t\t'''\n\t\tnwset = cls(wset.ntx, wset.txstart, wset.nsamp, wset.f2c, wset.dtype, wset.txgrps)\n\t\tif with_context: nwset.context = wset.context.copy()\n\t\telse: nwset.context = { }\n\t\treturn nwset\n\n\n\tdef __init__(self, ntx=0, txstart=0, nsamp=4096, f2c=0,\n\t\t\tdtype=np.dtype('int16'), txgrps=None):\n\t\t'''\n\t\tCreate an empty WaveformSet object that embodies acquisitions\n\t\tof a set of waveforms from a total of ntx transmission indices (0-based)\n\t\tstarting from index txstart. Each acquisition starts after a\n\t\tfire-to-capture delay of f2c samples and persists for nsamp\n\t\tsamples. Waveform arrays are stored with the specified Numpy\n\t\tdtype.\n\n\t\tIf txgrps is specified, it should be a TxGroupConfiguration\n\t\tobject or a tuple of the form (count, size) that specifies the\n\t\tnumber of transmit groups into which transmissions are\n\t\tsubdivided, and the number of elements in each group.\n\t\t'''\n\t\t# Record the waveform dtype\n\t\tself._dtype = np.dtype(dtype)\n\n\t\t# Prepopulate properties that will be validated later\n\t\tself._f2c = 0\n\t\tself._nsamp = 0\n\t\tself._ntx = 0\n\t\tself._txstart = 0\n\t\tself._txgrps = None\n\n\t\t# Create an empty, ordered record dictionary\n\t\t# Needed for validation of other properties\n\t\tself._records = OrderedDict()\n\n\t\t# Create an empty group map\n\t\tself._groupmap = { }\n\n\t\t# Assign validated properties\n\t\tself.nsamp = nsamp\n\t\tself.f2c = f2c\n\n\t\t# Build and validate the transmit-channel mapping\n\t\tself.ntx = ntx\n\t\tself.txstart = txstart\n\n\t\t# Initialize the group configuration as specified\n\t\tself.txgrps = txgrps\n\n\t\t# Extra scan context can be read from a file header and is\n\t\t# passed on when writing compatible versions, but is never\n\t\t# inherently interpreted\n\t\tself.context = { }\n\n\n\t@classmethod\n\tdef _verify_file_version(cls, version, write=False):\n\t\t'''\n\t\tEnsure that the provided version matches one supported by the\n\t\tWaveformSet class. If version is unsupported, a ValueError is\n\t\traised. Otherwise, just return the version tuple.\n\t\t'''\n\t\ttry:\n\t\t\tmajor, minor = version\n\t\t\tmajor = strict_nonnegative_int(major)\n\t\t\tminor = strict_nonnegative_int(minor)\n\t\texcept (TypeError, ValueError):\n\t\t\traise ValueError('Version format is not recognized')\n\n\t\tif major != 1: raise ValueError('Unsupported major version')\n\n\t\tif not write:\n\t\t\t# Support all currently defined formats for reading\n\t\t\tif not (0 <= minor < 7):\n\t\t\t\traise ValueError('Unsupported minor version for reading')\n\t\t\treturn (major, minor)\n\n\t\t# Only version-6 writes are supported\n\t\tif minor != 6:\n\t\t\traise ValueError('Unsupported minor version for writing')\n\n\t\treturn major, minor\n\n\n\tdef store(self, f, append=False, ver=(1,6), compression=None):\n\t\t'''\n\t\tWrite the WaveformSet object to the data file in f (either a\n\t\tname or a file-like object that allows writing).\n\n\t\tIf append is True, the file-level header is not written. An\n\t\tunopened file is opened for appends instead of truncating an\n\t\texisting file. It is the caller's responsibility to assure that\n\t\tan existing file header is consistent with records written by\n\t\tthis method in append mode.\n\n\t\tThe compression argument should be None, 'gzip' or 'bz2'. If\n\t\tcompression is not None, f is a string and append is False, the\n\t\tfile will be opened as a gzip.GzipFile (for 'gzip') or\n\t\tbz2.BZ2File (for 'bz2'). It is a ValueError to specify a\n\t\tnon-None value for compression and a string for f when append\n\t\tmode is True. When f is not a string, the value of compression\n\t\tis ignored.\n\n\t\t** NOTE **\n\t\tBecause the WaveformSet may map some input file for waveform\n\t\tarrays after calling load(), calling store() with the same file\n\t\tused to load() may cause unexpected behavior.\n\t\t'''\n\t\t# Open the file if it is not open\n\t\tif isinstance(f, str):\n\t\t\topener, compressed = self._get_open(None, compression)\n\t\t\tif compressed and append:\n\t\t\t\traise ValueError('Append mode with compression is not supported')\n\t\t\tf = opener(f, ('ab' if append else 'wb'))\n\n\t\t# Verify that the output version is supported\n\t\tmajor, minor = self._verify_file_version(ver, write=True)\n\n\t\t# A missing transmit-group configuration takes the special value (0,0)\n\t\ttry: gcount, gsize = self.txgrps\n\t\texcept (TypeError, ValueError): gcount, gsize = 0, 0\n\n\t\tif not append:\n\t\t\t# Encode the magic number and file version\n\t\t\thbytes = struct.pack('<4s2I', b'WAVE', major, minor)\n\n\t\t\t# Encode temperature values\n\t\t\ttemps = self.context.get('temps', [float('nan')]*2)\n\t\t\thbytes += np.asarray(temps, dtype=np.float32).tobytes()\n\n\t\t\t# Encode the datatype\n\t\t\ttypecode = self.typecodes.inverse[np.dtype(self.dtype).name][0]\n\t\t\thbytes += struct.pack('<2s', typecode)\n\n\t\t\t# Encode transmission parameters\n\t\t\thbytes += struct.pack('<4I2HI', self.f2c, self.nsamp,\n\t\t\t\t\tself.nrx, self.ntx, gcount, gsize, self.txstart)\n\n\t\t\ttry:\n\t\t\t\t# Make sure TGC is a 1-D array\n\t\t\t\ttgc = np.asarray(self.context['tgc'], dtype=np.float32).squeeze()\n\t\t\texcept KeyError:\n\t\t\t\t# Header contains no TGC records\n\t\t\t\thbytes += struct.pack('<I', 0)\n\t\t\telse:\n\t\t\t\tif tgc.ndim != 1:\n\t\t\t\t\traise ValueError('TGC must be a 1-D array of floats')\n\t\t\t\thbytes += struct.pack('<I')\n\t\t\t\thbytes += tgc.tobytes()\n\n\t\t\tf.write(hbytes)\n\n\t\t# Write each record in turn\n\t\tfor idx in sorted(self.rxidx):\n\t\t\thdr, waveforms = self._get_record_raw(idx)\n\n\t\t\tif idx != hdr.idx:\n\t\t\t\traise ValueError('Record index does not match receive-channel index')\n\n\t\t\tpx, py, pz = hdr.pos\n\t\t\tws, wl = hdr.win\n\n\t\t\t# Without a transmit-group configuration, use (0,0)\n\t\t\ttry: li, gi = hdr.txgrp\n\t\t\texcept (TypeError, ValueError): li, gi = 0, 0\n\n\t\t\t# Enclode the receive-channel header\n\t\t\thbytes = struct.pack('<3I3f2I', idx, li, gi, px, py, pz, ws, wl)\n\n\t\t\tf.write(hbytes)\n\t\t\t# Encode the waveform data\n\t\t\twbytes = waveforms.tobytes()\n\t\t\tf.write(wbytes)\n\t\t\tf.flush()\n\n\n\t@staticmethod\n\tdef _funpack(f, fmt):\n\t\t'''\n\t\tRead from the file pointer f (using f.read) the appropriate\n\t\tnumber of bytes to unpack the struct described by the format\n\t\tstring fmt.\n\n\t\tThe file must already be open. Any exception is caught and\n\t\tconverted into a WaveformSetIOError.\n\t\t'''\n\t\ttry:\n\t\t\tsz = struct.calcsize(fmt)\n\t\t\treturn struct.unpack(fmt, f.read(sz))\n\t\texcept Exception as err:\n\t\t\traise WaveformSetIOError(f'Failure to unpack bytes: {err}')\n\n\n\t@staticmethod\n\tdef _npunpack(f, dtype, count):\n\t\t'''\n\t\tRead from the file point f (using f.read) the approriate number\n\t\tof bytes to built a 1-D Numpy array of the specified type and\n\t\tcount. The count must be nonnegative. If count is 0, the\n\t\treturned array will be empty.\n\n\t\tThe file must alread by open. Any exception raised by the I/O\n\t\tand Numpy bytes-to-array conversion is caught and converted\n\t\tinto a WaveformSetIOError.\n\t\t'''\n\t\tif count < 0:\n\t\t\traise ValueError(f'Cannot read {count} bytes into Numpy array')\n\t\telif count < 1:\n\t\t\treturn np.array([], dtype=dtype)\n\n\t\tdtype = np.dtype(dtype)\n\n\t\ttry:\n\t\t\trbytes = f.read(dtype.itemsize * count)\n\t\t\treturn np.frombuffer(rbytes, dtype, count)\n\t\texcept Exception as err:\n\t\t\traise WaveformSetIOError(f'Failure to read array: {err}')\n\n\n\t@classmethod\n\tdef load(cls, f, force_dtype=None, allow_duplicates=False,\n\t\t\tskip_zero_length=True, warn_on_error=True,\n\t\t\theader_only=False, stream_mode=False):\n\t\t'''\n\t\tCreate a WaveformSet object with the data in f, a file-like\n\t\tobject or string specifying a file name. If f is a file-like\n\t\tobject, parsing starts from the current file position.\n\n\t\tIn general, any error will cause a WaveformSetIOError exception\n\t\tto be raised.\n\n\t\tEach block of waveform data is memory-mapped (except when\n\t\tstream_mode is True; see below) from the source file. This\n\t\tmapping is copy-on-write; changes do not persist.\n\n\t\tIf force_dtype is not None, and the data type of records stored\n\t\tin the file is not equal to force_dtype, each record block will\n\t\tbe converted to the data type in the datatype argument.\n\n\t\tIf allow_duplicates is False, file parsing will halt the first\n\t\ttime a header is encounted for a receive-channel index\n\t\tpreviously encountered in the file. If allow_duplicates is\n\t\tTrue, each receive-channel record will replace any previously\n\t\tencountered records for the same channel index.\n\n\t\tRecords for which the data block has zero length will be read\n\t\tbut not stored in the WaveformSet object if skip_zero_length is\n\t\tTrue; if it is False, the empty record will be stored.\n\n\t\t** NOTE: If allow_duplicates is False, encountering multiple\n\t\trecords for the same receive-channel index will terminate even\n\t\tif one or more of the duplicate records has zero length and\n\t\tskip_zero_length is True.\n\n\t\tIt is an error if the number of parsed receive-channel records\n\t\tdoes not equal the number of records enconcoded in the file\n\t\theader. If warn_on_error is True, this error will cause a\n\t\twarning to be issued. Otherwise, a WaveformSetIOError will be\n\t\traised in case this error is encountered.\n\n\t\tIf header_only is True, the contents of the WaveformSet header\n\t\theader will be read from the file, but processing will stop\n\t\tbefore records are read and stored in the WaveformSet instance.\n\t\tNo file-length checks are performed to determine whether the\n\t\tfile contents are valid (beyond the ability to parse the\n\t\theader), and no indication of the receive channels encoded in\n\t\tthe file will be available.\n\n\t\tWhen header_only is False, this method returns the WaveformSet\n\t\tinstance. When header_only is True, this method returns the\n\t\tWaveformSet and the value of the \"nrx\" property encoded in the\n\t\tfile.\n\t\t\n\t\tIf stream_mode is True, the waveform data will not be\n\t\tmemory-mapped, but will be copied into locally controlled\n\t\tmemory. Furthermore, seeks will not be performed on the input,\n\t\tmaking this mode suitable for compressed input. (This method\n\t\twill not attempt to open compressed files, so the argument f\n\t\tshould be a GzipFile, BZ2File or similar instance if inline\n\t\tdecompression is desired.)\n\t\t'''\n\t\t# Open the file if it is not open\n\t\tif isinstance(f, str):\n\t\t\topener, compressed = cls._get_open(f)\n\t\t\tf = opener(f, mode='rb')\n\t\t\t# Force stream mode for compressed input\n\t\t\tif compressed: stream_mode = True\n\n\t\t# Convenience: attach the file to funpack and npunpack\n\t\tfunpack = partial(cls._funpack, f)\n\t\tnpunpack = partial(cls._npunpack, f)\n\n\t\t# Read the magic number and file version\n\t\ttry:\n\t\t\tmagic, major, minor = funpack('<4s2I')\n\t\t\tif magic != b'WAVE': raise WaveformSetIOError\n\t\texcept WaveformSetIOError:\n\t\t\traise WaveformSetIOError('Unable to identify WAVE header')\n\n\t\ttry: major, minor = cls._verify_file_version((major, minor))\n\t\texcept ValueError as err:\n\t\t\traise WaveformSetIOError(f'Unsupported WAVE format: {err}')\n\n\t\t# Create some empty context\n\t\tcontext = { }\n\n\t\tif minor > 4:\n\t\t\t# Read temperature context\n\t\t\ttry: context['temps'] = npunpack('float32', 2)\n\t\t\texcept WaveformSetIOError as err:\n\t\t\t\traise WaveformSetIOError(f'Invalid temperature: {err}')\n\n\t\t# Read the type code for this file\n\t\ttry:\n\t\t\ttypecode = funpack('<2s')[0]\n\t\t\tdtype = np.dtype(cls.typecodes[typecode])\n\t\texcept (WaveformSetIOError, KeyError) as err:\n\t\t\traise WaveformSetIOError(f'Invalid typecode: {err}')\n\n\t\tif force_dtype is not None:\n\t\t\t# Force a dtype conversion, if necessary\n\t\t\tforce_dtype = np.dtype(force_dtype)\n\t\t\tif force_dtype == dtype: force_dtype = None\n\n\t\t# Parse common transmission parameters\n\t\tf2c, nsamp, nrx, ntx = funpack('<4I')\n\n\t\t# By default, start the transmission indexing at 0\n\t\ttxstart = 0\n\t\t# Clear any group configuration for now\n\t\ttxgrps = None\n\n\t\tif minor > 1:\n\t\t\t# Read the group configuration\n\t\t\tcount, size = funpack('<2H')\n\t\t\t# Make sure both values are sensible integers\n\t\t\tcount = strict_nonnegative_int(count)\n\t\t\tsize = strict_nonnegative_int(size)\n\n\t\t\t# Only configure transmit groups if the count is positive\n\t\t\tif count > 0:\n\t\t\t\t# Default group size, if unspecified, is 10240 / count\n\t\t\t\tif size == 0:\n\t\t\t\t\tsize = 10240 // count\n\t\t\t\t\tif size * count != 10240:\n\t\t\t\t\t\tmsg = f'Unable to infer size for {count} groups'\n\t\t\t\t\t\traise WaveformIOError(msg)\n\n\t\t\t\ttxgrps = count, size\n\n\t\t\t# For version (1,4) and above, read an explicit txstart\n\t\t\tif minor >= 4: txstart = funpack('<I')[0]\n\n\t\t\t# Minor versions below 6 used fixed 256-value TGC records\n\t\t\tif minor < 6: rcount = 256\n\t\t\telse: rcount = funpack('<I')[0]\n\n\t\t\tif rcount:\n\t\t\t\ttry: tgc = npunpack('float32', rcount)\n\t\t\t\texcept WaveformSetIOError as err:\n\t\t\t\t\tmsg = f'Unable to read {rcount} TGC values: {err}'\n\t\t\t\t\traise WaveformSetIOError(msg)\n\t\t\t\t# For minor versions < 6, don't keep all-zero TGC\n\t\t\t\tif minor > 5 or np.count_nonzero(tgc):\n\t\t\t\t\tcontext['tgc'] = tgc\n\t\telif minor == 0:\n\t\t\t# Verion 0 uses an explicit 1-based transmit-index list\n\t\t\ttry: txidx = npunpack('uint32', ntx) - 1\n\t\t\texcept WaveformSetIOError:\n\t\t\t\tmsg = 'Tx list must contain {ntx} values: {err}'\n\t\t\t\traise WaveformSetIOError(msg)\n\n\t\t# Now create the empty object and associate context\n\t\twset = cls(ntx=ntx, txstart=txstart, nsamp=nsamp, f2c=f2c,\n\t\t\t\tdtype=(force_dtype or dtype), txgrps=txgrps)\n\t\twset.context = context\n\n\t\t# Skip processing of records in header_only mode\n\t\tif header_only: return wset, nrx\n\n\t\tif not stream_mode:\n\t\t\t# Use a single Python mmap buffer for backing data\n\t\t\t# (Map starts at file start; remember current location)\n\t\t\tfsrec = f.tell()\n\t\t\tbuf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_COPY)\n\t\t\tf.seek(fsrec)\n\n\t\t# For (1, 2) files, keep a running index tally\n\t\tidx = -1\n\n\t\t# If the set isn't configured for transmit groups,\n\t\t# ignore any group spec in the receive-channel headers\n\t\tusegrps = (wset.txgrps is not None)\n\n\t\t# Keep track of duplicate records, if necessary\n\t\tif not allow_duplicates:\n\t\t\tencountered = set()\n\n\t\t# Parse through the specified number of receive records\n\t\t# As a special case, when nrx is zero, read all possible records\n\t\twhile nrx == 0 or wset.nrx < nrx:\n\t\t\tif minor == 2:\n\t\t\t\t# Update running index\n\t\t\t\tidx += 1\n\t\t\telse:\n\t\t\t\t# Read a global channel index\n\t\t\t\t# Correct 1-based indexing in early versions\n\t\t\t\ttry: idx = funpack('<I')[0] - int(minor < 2)\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\t# Read element position and data window parameters\n\t\t\tif minor > 1:\n\t\t\t\t# Also read transmission group configuration\n\t\t\t\ttry: i, g, px, py, pz, ws, wl = funpack('<2I3f2I')\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\t\ttxgrp = (i, g) if usegrps else None\n\t\t\t\tif minor == 2:\n\t\t\t\t\t# Correct an off-by-one window specification bug\n\t\t\t\t\tif wl == nsamp and ws == 1: ws = 0\n\t\t\telse:\n\t\t\t\ttry: px, py, pz, ws, wl = funpack('<3f2I')\n\t\t\t\texcept WaveformSetIOError: break\n\t\t\t\ttxgrp = None\n\n\t\t\t# Build the channel header\n\t\t\thdr = (idx, (px, py, pz), (ws, wl), txgrp)\n\n\t\t\tif not allow_duplicates:\n\t\t\t\tif idx in encountered:\n\t\t\t\t\tmsg = f'Parsing terminated at duplicate record {idx}'\n\t\t\t\t\twarnings.warn(WaveformSetIOWarning(msg))\n\t\t\t\t\t# Avoid detecting junk after duplicate header\n\t\t\t\t\tif not stream_mode: fsrec = f.tell()\n\t\t\t\t\tbreak\n\t\t\t\tencountered.add(idx)\n\n\t\t\t# Determine the shape of the waveform\n\t\t\twaveshape = (ntx, wl)\n\n\t\t\tif not stream_mode:\n\t\t\t\t# Return a view into the map\n\t\t\t\tfsmap = f.tell()\n\n\t\t\t\ttry:\n\t\t\t\t\twavemap = np.ndarray(waveshape,\n\t\t\t\t\t\t\tdtype=dtype, buffer=buf,\n\t\t\t\t\t\t\torder='C', offset=fsmap)\n\t\t\t\texcept TypeError: break\n\n\t\t\t\t# Skip to next header and update next record offset\n\t\t\t\tf.seek(fsmap + wavemap.nbytes)\n\t\t\t\tfsrec = f.tell()\n\t\t\telse:\n\t\t\t\t# Read into a new array\n\t\t\t\tnvals = waveshape[0] * waveshape[1]\n\n\t\t\t\ttry: wavemap = npunpack(dtype, nvals).reshape(waveshape, order='C')\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\tif not skip_zero_length or wavemap.nbytes != 0:\n\t\t\t\tif force_dtype is not None:\n\t\t\t\t\twmap = wavemap.astype(force_dtype)\n\t\t\t\telse: wmap = wavemap\n\t\t\t\t# Add the record to the set\n\t\t\t\twset.setrecord(hdr, wmap, copy=False)\n\n\t\tif not stream_mode and f.tell() != fsrec:\n\t\t\twarnings.warn(WaveformSetIOWarning('Junk at end of file'))\n\n\t\tif nrx and wset.nrx != nrx:\n\t\t\terr = f'Header specifies {nrx} records, but read {wset.nrx}'\n\t\t\tif warn_on_error: warnings.warn(WaveformSetIOWarning(err))\n\t\t\telse: raise WaveformSetIOError(err)\n\n\t\treturn wset\n\n\n\t@property\n\tdef rxidx(self):\n\t\t'''\n\t\tReturn a list of receive-channel indices in file order.\n\t\t'''\n\t\treturn list(self._records.keys())\n\n\n\t@property\n\tdef txgrps(self):\n\t\t'''\n\t\tReturn the (count, size) of transmit groups, or None for no grouping.\n\t\t'''\n\t\treturn self._txgrps\n\n\n\[email protected]\n\tdef txgrps(self, grps):\n\t\t'''\n\t\tSet the group count and length. Removes any existing groupmap\n\t\tproperty.\n\t\t'''\n\t\tif grps == self._txgrps: return\n\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change transmit-group configuration with existing records')\n\n\t\tif grps is None:\n\t\t\tself._txgrps = None\n\t\t\tself.groupmap = None\n\t\t\treturn\n\n\t\ttry:\n\t\t\tgrps = TxGroupConfiguration(*grps)\n\t\texcept (TypeError, ValueError):\n\t\t\traise ValueError('Parameter must be None or (count, size) tuple')\n\n\t\tif grps.maxtx < self.ntx:\n\t\t\traise ValueError('Implied maximum transmission count is less than number of recorded transmissions')\n\t\tif grps.maxtx <= self.txstart:\n\t\t\traise ValueError('Implied maximum transmission count is less than starting transmission index')\n\n\t\tself._txgrps = grps\n\t\tself.groupmap = None\n\n\n\t@property\n\tdef txstart(self):\n\t\t'''\n\t\tReturn the first transmission index in the records.\n\t\t'''\n\t\treturn self._txstart\n\n\n\[email protected]\n\tdef txstart(self, txstart):\n\t\t'''\n\t\tSet the first transmission index in the records, which must be\n\t\ta nonnegative integer within the tranmission range implied by\n\t\tthe group configuration in self.txgrps.\n\t\t'''\n\t\tif txstart == self._txstart: return\n\n\t\ttxstart = strict_nonnegative_int(txstart)\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tpass\n\t\telse:\n\t\t\tif txstart >= maxtx:\n\t\t\t\traise ValueError('Parameter txstart exceeds maxtx of transmit-group configuration')\n\n\t\tself._txstart = txstart\n\n\n\t@property\n\tdef txidx(self):\n\t\t'''\n\t\tReturn a generator of tranmit-channel indices in file order.\n\t\t'''\n\t\ttxstart = self.txstart\n\t\ttxgrps = self.txgrps\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tfor i in range(txstart, txstart + self.ntx):\n\t\t\t\tyield i\n\t\telse:\n\t\t\tfor i in range(txstart, txstart + self.ntx):\n\t\t\t\tyield i % maxtx\n\n\n\[email protected]\n\tdef txidx(self, txidx):\n\t\t'''\n\t\tChecks the provided list for sequential ordering of the input\n\t\tsequence txidx and, if the check is satisfied, assigns\n\t\tself.txstart and self.ntx accordingly.\n\n\t\tIf the indices are not sequential, but self.txgrps is None, the\n\t\ttxgrp configuration and self.groupmap will be set to map\n\t\ttransmit indices 0 through len(txidx) - 1 to the elements of\n\t\ttxidx.\n\t\t'''\n\t\ttxidx = list(txidx)\n\n\t\ttry: txstart = txidx[0]\n\t\texcept IndexError:\n\t\t\tself.ntx = 0\n\t\t\tself.txstart = 0\n\t\t\treturn\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tdef nextval(x): return (x + 1)\n\t\telse:\n\t\t\tdef nextval(x): return (x + 1) % maxtx\n\n\t\tlast = txstart\n\t\tsequential = True\n\n\t\tfor nv in txidx[1:]:\n\t\t\tlast = nextval(last)\n\t\t\tif nv != last:\n\t\t\t\tsequential = False\n\t\t\t\tbreak\n\n\t\tdef atomic_set(txstart, ntx):\n\t\t\t# Record the old txstart to ensure atomicity\n\t\t\totxstart = self.txstart\n\t\t\tself.txstart = txstart\n\n\t\t\ttry: self.ntx = ntx\n\t\t\texcept:\n\t\t\t\t# Restore the old txstart before failing\n\t\t\t\tself.txstart = otxstart\n\t\t\t\traise\n\n\t\tif not sequential:\n\t\t\tif self.txgrps is not None:\n\t\t\t\traise ValueError('Indices must be sequential or wrap when txgrps is defines')\n\t\t\t# Set txgrp configuration to remap out-of-sequence indices\n\t\t\tatomic_set(0, len(txidx))\n\t\t\tself.txgrps = (self.ntx, 1)\n\t\t\tself.groupmap = { txi: (0, i) for i, txi in enumerate(txidx) }\n\t\telse:\n\t\t\tatomic_set(txstart, len(txidx))\n\n\n\t@property\n\tdef ntx(self):\n\t\t'''\n\t\tReturn the number of transmissions per receive channel.\n\t\t'''\n\t\treturn self._ntx\n\n\n\[email protected]\n\tdef ntx(self, ntx):\n\t\t'''\n\t\tSet the number of transmissions per receive channel.\n\t\t'''\n\t\t# Take no action if the count hasn't changed\n\t\tif ntx == self._ntx: return\n\n\t\t# Don't attempt to change the transmit count with existing records\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change number of transmissions with existing records')\n\n\t\ttry:\n\t\t\tif ntx > self.txgrps.maxtx:\n\t\t\t\traise ValueError('Number of transmissions must not exceed maxtx implied by transmit-group configuration')\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\tself._ntx = strict_nonnegative_int(ntx)\n\n\n\t@property\n\tdef nrx(self):\n\t\t'''\n\t\tReturn the number of receive channels in this waveform set.\n\t\t'''\n\t\treturn len(self._records)\n\n\n\t@property\n\tdef dtype(self):\n\t\t'''\n\t\tReturn the datatype used to store waveforms.\n\t\t'''\n\t\treturn self._dtype\n\n\n\[email protected]\n\tdef dtype(self, value):\n\t\t'''\n\t\tSet the datatype used to store waveforms.\n\t\t'''\n\t\tif self._dtype == value: return\n\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change datatype with existing records')\n\t\tself._dtype = np.dtype(value)\n\n\n\t@property\n\tdef nsamp(self):\n\t\t'''\n\t\tReturn the total number of samples collected in the acquisitions.\n\t\t'''\n\t\treturn self._nsamp\n\n\n\[email protected]\n\tdef nsamp(self, nsamp):\n\t\t'''\n\t\tSet the total number of samples in the acquisition window.\n\t\tEnsure existing records don't fall outside of the window.\n\t\t'''\n\t\tif self._nsamp == nsamp: return\n\n\t\t# Force the new value to be an nonnegative integer\n\t\tnsamp = strict_nonnegative_int(nsamp)\n\n\t\t# Check all existing records to ensure their windows don't\n\t\t# extend past the new acquisition window\n\t\tfor hdr, wforms in self.allrecords():\n\t\t\tstart, length = hdr.win\n\t\t\tif start + length > nsamp:\n\t\t\t\traise ValueError('Acquisition window fails to contain stored waveforms')\n\n\t\t# Set the new value\n\t\tself._nsamp = nsamp\n\n\n\t@property\n\tdef f2c(self):\n\t\t'''\n\t\tReturn the fire-to-capture delay in 20-MHz samples.\n\t\t'''\n\t\treturn self._f2c\n\n\n\[email protected]\n\tdef f2c(self, val):\n\t\t'''\n\t\tSet the fire-to-capture delay in 20-MHz samples.\n\t\t'''\n\t\tif self._f2c == val: return\n\t\tself._f2c = strict_nonnegative_int(val)\n\n\n\t@property\n\tdef groupmap(self):\n\t\t'''\n\t\tAccess a copy of the map between global element indices to\n\t\ttuples (local index, group index) that govern firing order.\n\t\t'''\n\t\treturn dict(self._groupmap)\n\n\n\[email protected]\n\tdef groupmap(self, grpmap):\n\t\t'''\n\t\tCheck the provided mapping from global element indices to\n\t\t(local index, group index) for consistency and assign the map\n\t\tto this instance.\n\n\t\tSet grpmap to None or an object with 0 len() to clear the map.\n\t\t'''\n\t\tif grpmap is None or len(grpmap) < 1:\n\t\t\tself._groupmap = { }\n\t\t\treturn\n\n\t\tif self.txgrps is None:\n\t\t\traise ValueError('Cannot set a group map without a txgrps configuration for the WaveformSet')\n\n\t\t# Make sure the map is valid and consistent with txgrp configuration\n\t\tngrpmap = { }\n\t\tfor k, v in grpmap.items():\n\t\t\tki = strict_nonnegative_int(k)\n\t\t\tvi, vg = [strict_nonnegative_int(vl) for vl in v]\n\t\t\tif vi >= self.txgrps.size:\n\t\t\t\traise ValueError('Local index in group map exceeds txgrp size')\n\t\t\tif vg >= self.txgrps.count:\n\t\t\t\traise ValueError('Group index in group map exceeds txgrp count')\n\t\t\tngrpmap[ki] = (vi, vg)\n\n\t\t# Check any local receive-channels for consistence\n\t\tfor hdr in self.allheaders():\n\t\t\tif ngrpmap.get(hdr.idx, hdr.txgrp) != hdr.txgrp:\n\t\t\t\traise ValueError('Group map does not match receive-channel record at index %d' % hdr.idx)\n\n\t\tself._groupmap = ngrpmap\n\n\n\tdef element2tx(self, elt, unfold=True):\n\t\t'''\n\t\tConvert an element index elt into a transmission index. If no\n\t\ttransmit-group configuration exists, this is *ALWAYS* the\n\t\tidentity map.\n\n\t\tWhen a transmit-group configuration exists, self.groupmap is\n\t\tfirst checked for a transmit index for elt. If the groupmap\n\t\tdoes not exist or fails to specify the necessary index, the\n\t\ttxgrp configuration for a receive-channel record for index elt\n\t\t(if one exists) is used.\n\n\t\tIf unfold is True, the transmission index is a scalar value\n\t\tthat directly indexes rows in record arrays. If unfold is\n\t\tFalse, the transmission index is a pair (locidx, grpnum) that\n\t\tmaps to the unfolded index, t, by\n\n\t\t\tt = locidx + grpnum * self.txgrps.gsize.\n\t\t'''\n\t\telt = strict_nonnegative_int(elt)\n\n\t\ttry: gcount, gsize = self.txgrps\n\t\texcept TypeError: return elt\n\n\t\ttry:\n\t\t\ttxgrp = self._groupmap[elt]\n\t\texcept KeyError:\n\t\t\ttry: txgrp = self.getheader(elt).txgrp\n\t\t\texcept KeyError:\n\t\t\t\traise KeyError('Could not find map record for receive channel %d' % elt)\n\n\t\ttry:\n\t\t\tidx, grp = txgrp\n\t\texcept (TypeError, ValueError) as e:\n\t\t\traise ValueError('Unable to unpack invalid txgrp for channel %d' % elt)\n\n\t\treturn (grp * gsize + idx) if unfold else (idx, grp)\n\n\n\tdef tx2row(self, tid):\n\t\t'''\n\t\tConvert a transmit-channel index into a waveform-array row index.\n\t\t'''\n\t\t# Ensure that the argument is properly bounded\n\t\ttid = strict_nonnegative_int(tid)\n\n\t\ttxstart = self.txstart\n\n\t\ttry: maxtx = self.txgrps.maxtx\n\t\texcept AttributeError: maxtx = None\n\n\t\tif maxtx is not None:\n\t\t\tif tid >= maxtx:\n\t\t\t\traise ValueError('Argument tid exceeds self.txgrps.maxtx')\n\t\t\t# Shift low values to account for wraparound\n\t\t\tif tid < txstart: tid += maxtx\n\n\t\t# Shift relative to start\n\t\ttid -= self.txstart\n\n\t\t# Ensure the bounds are sensible\n\t\tif not 0 <= tid < self.ntx:\n\t\t\traise ValueError('Transmit index is not contained in this file')\n\t\treturn tid\n\n\n\tdef _get_record_raw(self, rid):\n\t\t'''\n\t\tReturn the raw (header, data) record for a given receive\n\t\tchannel rid, with only sanity checks on rid.\n\t\t'''\n\t\treturn self._records[strict_nonnegative_int(rid)]\n\n\n\tdef getheader(self, rid):\n\t\t'''\n\t\tReturn the channel header for receive channel rid.\n\t\t'''\n\t\treturn self._get_record_raw(rid)[0]\n\n\n\tdef getrecord(self, rid, tid=None, window=None, dtype=None, maptids=False):\n\t\t'''\n\t\tReturn a (header, waveforms) record for the receive channel\n\t\twith channel index rid. If window is None and dtype is None,\n\t\tthe waveforms data array is a view of the internal\n\t\tcopy-on-write memory map.\n\n\t\tIf tid is not None, it should be a scalar integer or an\n\t\titerable of integers that represent transmit channel indices to\n\t\tpull from the waveform array. When tid is a scalar, a 1-D array\n\t\tis returned to represent the samples for the specified\n\t\ttransmission. When tid is an iterable (even of length 1), a 2-D\n\t\tarray is returned with transmit indices along the rows (in the\n\t\torder specified by tid) and waveform samples along the columns.\n\t\tWhen tid is None, self.txidx is assumed.\n\n\t\tIf window is not None, it should be a tuple (start, length)\n\t\tthat specifies the first sample and length of the temporal\n\t\twindow over which the waveforms are interpreted. Even if window\n\t\tmatches the internal window in the header, a copy of the\n\t\twaveform array will be made.\n\n\t\tIf dtype is not None, the output copy of the waveforms in the\n\t\trecord will be cast to this datatype.\n\n\t\tIf exactly one of window or dtype is None, the corresponding\n\t\tvalue from the record will be used.\n\n\t\tTo force a copy without knowing or changing the window and\n\t\tdtype, pass dtype=0.\n\n\t\tIf maptids is True, any indices specified in tid will be\n\t\tconverted from an element index to a transmission index using\n\t\tself.element2tx().\n\t\t'''\n\t\t# Grab receive record, copy header to avoid corruption\n\t\thdr, waveforms = self._get_record_raw(rid)\n\n\t\tif maptids and tid is not None:\n\t\t\t# Map the transmit indices to element indices\n\t\t\ttry:\n\t\t\t\ttid = self.element2tx(tid)\n\t\t\texcept TypeError:\n\t\t\t\ttid = [self.element2tx(t) for t in tid]\n\n\t\ttry:\n\t\t\ttcidx = self.tx2row(tid)\n\t\t\tsingletx = True\n\t\texcept TypeError:\n\t\t\tsingletx = False\n\t\t\tif tid is None:\n\t\t\t\ttcidx = list(range(self.ntx))\n\t\t\telse:\n\t\t\t\ttcidx = [self.tx2row(t) for t in tid]\n\n\t\tif window is None:\n\t\t\tif dtype is None:\n\t\t\t\t# With no type override, just return a view\n\t\t\t\treturn hdr, waveforms[tcidx,:]\n\t\t\telse:\n\t\t\t\t# Force a type conversion and copy\n\t\t\t\tif dtype == 0:\n\t\t\t\t\tdtype = waveforms.dtype\n\t\t\t\treturn hdr, waveforms[tcidx,:].astype(dtype, copy=True)\n\n\t\t# Handle a specific data window\n\t\tfrom .sigtools import Window\n\t\twindow = Window(window)\n\n\t\t# Handle unspecified data types\n\t\tif dtype is None or dtype == 0:\n\t\t\tdtype = waveforms.dtype\n\n\t\t# Create an output array to store the results\n\t\toshape = (1 if singletx else len(tcidx), window.length)\n\t\toutput = np.zeros(oshape, dtype=dtype)\n\n\t\ttry:\n\t\t\t# Figure out the overlapping sample window\n\t\t\t# Raises TypeError if overlap() returns None\n\t\t\tfrom pycwp.cutil import overlap\n\t\t\tostart, istart, wlen = overlap(window, hdr.win)\n\t\t\toend, iend = ostart + wlen, istart + wlen\n\n\t\t\t# Copy portion of waveforms overlapping the window\n\t\t\toutput[:,ostart:oend] = waveforms[tcidx,istart:iend]\n\t\texcept TypeError: pass\n\n\t\t# For a scalar tid, collapse the 2-D array\n\t\tif singletx: output = output[0]\n\n\t\t# Override the window in the header copy\n\t\treturn hdr.copy(win=window), output\n\n\n\tdef getwaveform(self, rid, tid, *args, cyclic=False, **kwargs):\n\t\t'''\n\t\tReturn, as one or more habis.sigtools.Waveform objects, the\n\t\twaveform(s) recorded at receive-channel index rid from the\n\t\t(scalar or iterable of) transmission(s) tid.\n\n\t\tIf tid is a scalar, a single Waveform object is returned.\n\t\tOtherwise, if tid is an iterable or None (which pulls all\n\t\ttransmissions), a list of Waveform objects is returned.\n\n\t\tThe Waveform time reference is the global time reference. In\n\t\tother words, the Waveform is created from the raw record, then\n\t\tshifted by self.f2c. If the shift moves the data window past\n\t\tthe end of the window (0, self.nsamp), some of the data will be\n\t\tclipped. To instead cyclically wrap any samples that would be\n\t\tclipped, pass cyclic=True to this method.\n\n\t\tExtra args and kwargs are passed through to getrecord().\n\t\t'''\n\t\tfrom .sigtools import Waveform\n\t\t# Grab the relevant row of the record\n\t\thdr, wform = self.getrecord(rid, tid, *args, **kwargs)\n\n\t\t# Wrap a single desired signal in a Waveform object\n\t\tif np.ndim(wform) == 1:\n\t\t\twave = Waveform(self.nsamp, wform, hdr.win.start)\n\t\t\twave = wave.shift(self.f2c, cyclic=cyclic)\n\t\t\treturn wave\n\t\telse:\n\t\t\twarr = [ ]\n\t\t\tfor w in wform:\n\t\t\t\twave = Waveform(self.nsamp, w, hdr.win.start)\n\t\t\t\twave = wave.shift(self.f2c, cyclic=cyclic)\n\t\t\t\twarr.append(wave)\n\t\t\treturn warr\n\n\n\tdef delrecord(self, rid):\n\t\t'''\n\t\tDelete the waveform record for the receive-channel index rid.\n\t\t'''\n\t\tdel self._records[strict_nonnegative_int(rid)]\n\n\n\tdef clearall(self):\n\t\t'''\n\t\tDelete all waveform records in the set.\n\t\t'''\n\t\t# Just create a new record dictionary\n\t\tself._records = OrderedDict()\n\n\n\tdef setrecord(self, hdr, waveforms=None, copy=True):\n\t\t'''\n\t\tSave a waveform record consisting of the provided header and\n\t\twaveform array. If a record for the receive channel specified\n\t\tin the header already exists, it will be overwritten.\n\t\tOtherwise, the record will be created.\n\n\t\tIf the header specifies None for txgrp, but the WaveformSet\n\t\ttransmit-group configuration is not None, any groupmap\n\t\tassociated with the WaveformSet will be searched for a matching\n\t\treceive-channel index to create a matching txgrp. No other\n\t\tautomatic txgrp manipulation is attempted.\n\n\t\tThe waveform array must either be a Numpy ndarray or None. When\n\t\twaveforms takes the special value None, a new, all-zero\n\t\twaveform array is created (regardless of the value of copy).\n\n\t\tIf copy is False, a the record will store a reference to the\n\t\twaveform array if the types are compatible. If copy is True, a\n\t\tlocal copy of the waveform array, cast to this set's dtype,\n\t\twill always be made.\n\t\t'''\n\t\thdr = RxChannelHeader(*hdr)\n\n\t\tif self.txgrps is not None:\n\t\t\t# Ensure consistency with the group configuration\n\t\t\tif hdr.txgrp is None:\n\t\t\t\t# Check the group map for a matching record\n\t\t\t\ttry:\n\t\t\t\t\ttxgrp = self.element2tx(hdr.idx, unfold=False)\n\t\t\t\texcept (KeyError, TypeError):\n\t\t\t\t\traise ValueError('Record is missing required txgrp configuration')\n\t\t\t\telse:\n\t\t\t\t\thdr = hdr.copy(txgrp=txgrp)\n\t\t\telif hdr.txgrp.grp >= self.txgrps.count:\n\t\t\t\traise ValueError('Record group number too large')\n\t\t\telif hdr.txgrp.idx >= self.txgrps.size:\n\t\t\t\traise ValueError('Record local index too large')\n\t\t\telse:\n\t\t\t\t# Ensure consistency with the groupmap\n\t\t\t\ttry:\n\t\t\t\t\trgrp = self.groupmap[hdr.idx]\n\t\t\t\texcept (TypeError, KeyError):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tif rgrp != hdr.txgrp:\n\t\t\t\t\t\traise ValueError('Record txgrp does not match groupmap')\n\t\telif hdr.txgrp is not None:\n\t\t\traise ValueError('Record contains inappropriate txgrp configuration')\n\n\t\t# Check that the header bounds make sense\n\t\tif hdr.win.end > self.nsamp:\n\t\t\traise ValueError('Waveform sample window exceeds acquisition window duration')\n\n\t\tif waveforms is None:\n\t\t\t# Create an all-zero waveform array\n\t\t\twshape = (self.ntx, hdr.win.length)\n\t\t\twaveforms = np.zeros(wshape, dtype=self.dtype)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tif copy or waveforms.dtype != self.dtype:\n\t\t\t\t\t# Make a copy of the waveform in proper format\n\t\t\t\t\traise TypeError('Conversion of dtypes required')\n\t\t\texcept (AttributeError, TypeError):\n\t\t\t\twaveforms = np.array(waveforms, dtype=self.dtype)\n\n\t\t\t# Pad 0-d and 1-d waveforms to 2-d\n\t\t\tif waveforms.ndim < 2:\n\t\t\t\twaveforms = waveforms[[None] * (2 - waveforms.ndim)]\n\n\t\t\t# Check the proper shape of the provided array\n\t\t\tntx, nsamp = waveforms.shape\n\t\t\tif ntx != self.ntx:\n\t\t\t\traise ValueError('Waveform array does not match transmission count for set')\n\t\t\tif nsamp != hdr.win.length:\n\t\t\t\traise ValueError('Waveform array does not match sample count specified in header')\n\n\t\t# Add or replace the record\n\t\tself._records[hdr.idx] = (hdr, waveforms)\n\n\n\tdef allrecords(self, *args, **kwargs):\n\t\t'''\n\t\tReturn a generator that fetches each record, in channel-index\n\t\torder, using self.getrecord(rid, window, dtype).\n\t\t'''\n\t\tfor rid in sorted(self.rxidx):\n\t\t\tyield self.getrecord(rid, *args, **kwargs)\n\n\n\tdef allheaders(self):\n\t\t'''\n\t\tReturn a generator that fetches, in channel-index order, only\n\t\tthe receive-channel record headers.\n\t\t'''\n\t\tfor rid in sorted(self.rxidx):\n\t\t\tyield self.getheader(rid)\n", "'''\nRoutines for distributed filtering of Numpy arrays in an MPI environment.\n'''\n\n# Copyright (c) 2017 Andrew J. Hesford. All rights reserved.\n# Restrictions are listed in the LICENSE file distributed with this package.\n\nimport numpy as np\nimport scipy.ndimage\n\ndef parshare(n, overlap, rank, size):\n\t'''\n\tFor a sequence of n values, return as (start, end) the indices that\n\tdefine the rank-th of size chunks of the sequence that overlap by\n\toverlap items.\n\t'''\n\t# Find the size and start of non-overlapping portions\n\tshare, srem = n // size, n % size\n\tstart = rank * share + min(rank, srem)\n\tif rank < srem: share += 1\n\n\t# Extend ends to overlap reigion\n\treturn max(0, start - overlap), min(n, start + share + overlap)\n\n\ndef gathersize(n, nrec, size):\n\t'''\n\tFor a sequence of n * nrec items represented as n contiguous blocks of\n\tnrec items each, return as (starts, shares) lists of the starting\n\tindices (into the flat list) and item counts for each of size chunks.\n\t'''\n\tshare, srem = n // size, n % size\n\tstarts, shares = zip(*((nrec * (share * i + min(i, srem)),\n\t\t\t\tnrec * (share + int(i < srem))) for i in range(size)))\n\treturn starts, shares\n\n\ndef parfilter(name, comm=None):\n\t'''\n\tFor a given named filter, construct a MPI distributed version of the\n\tfilter that divides the workload along the first axis of the filtered\n\timage among all ranks in the MPI communicator comm (MPI.COMM_WORLD by\n\tdefault).\n\n\tDistributed slices of the image will overlap along the first axis by a\n\t\"pad\" to mitigate boundary artifacts that would otherwise result from\n\tslicing. The wrapped MPI function supports an optional keyword-only\n\t\"npad\" argument that specifies the width of the overlap. If \"npad\" is\n\tnot provided, the filter must support a \"footprint\" or \"size\" argument\n\t(if both are provided, footprint is preferred) and the value of \"npad\"\n\twill be half the footprint or size.\n\n\tThe filter that will be parallelized is selected from scipy.ndimage if\n\tsuch a function exists in that module, or else from pycwp.filter.\n\n\tThe return value is a wrapper function with the same signature as the\n\toriginal filter except for the addition of the optional keyword-only\n\t\"npad\" argument to indicate overlap of distributed slices. The \"npad\"\n\targument is consumed by the wrapper and will not be passed to the\n\twrapped filter. All other arguments are passed to the wrapped filter.\n\tThe function will handle distribution of the input array (which should\n\tbe the entire array on each process) and accumulation of the result\n\t(which will be the same array on each process).\n\t'''\n\ttry: filt = getattr(scipy.ndimage, name)\n\texcept AttributeError:\n\t\timport pycwp.filter\n\t\tfilt = getattr(pycwp.filter, name)\n\n\tfrom mpi4py import MPI\n\tif comm is None: comm = MPI.COMM_WORLD\n\n\tdef filterfunc(a, size=None, footprint=None, *args, npad=None, **kwargs):\n\t\t# Determine the necessary overlap in slicing\n\t\tif npad is not None:\n\t\t\tnpad = int(npad)\n\t\telif footprint is not None:\n\t\t\tfootprint = np.asarray(footprint)\n\t\t\tnpad = footprint.shape[0] // 2\n\t\t\tkwargs['footprint'] = footprint\n\t\telif size is not None:\n\t\t\ttry: npad = size[0] // 2\n\t\t\texcept TypeError: npad = size // 2\n\t\t\tkwargs['size'] = size\n\t\telse: raise TypeError('One of \"size\", \"footprint\" or \"npad\" is required')\n\n\t\t# Make sure the array is an array\n\t\ta = np.asarray(a)\n\n\t\t# Pull the local portion with appropriate padding for filtering\n\t\tlidx, hidx = parshare(a.shape[0], npad, comm.rank, comm.size)\n\n\t\t# Filter and update the local portion of a\n\t\tlfa = filt(a[lidx:hidx], **kwargs)\n\n\t\t# Make sure that the output array is compatible\n\t\tb = np.zeros(a.shape[:1] + lfa.shape[1:], dtype=np.float64, order='C')\n\t\tb[lidx:hidx] = lfa\n\n\t\t# Gather local contributions everywhere\n\t\tnrec = int(np.prod(b.shape[1:]))\n\t\tstarts, shares = gathersize(b.shape[0], nrec, comm.size)\n\t\tcomm.Allgatherv(MPI.IN_PLACE, [b, shares, starts, MPI.DOUBLE])\n\n\t\t# Return the entire result\n\t\treturn b\n\n\treturn filterfunc\n" ]
[ [ "pandas.read_csv", "numpy.fromfile", "numpy.asarray", "numpy.issubdtype", "numpy.ndarray", "numpy.dtype", "numpy.all", "numpy.concatenate", "numpy.frombuffer", "numpy.ndim", "numpy.count_nonzero", "numpy.savetxt", "numpy.load", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.loadtxt" ], [ "numpy.asarray", "numpy.zeros", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jgerardin/covid-chicago
[ "c2b91fdb42eece413e6fb0f6cee019357b96e00d" ]
[ "data_processing/exceeding_capacity_1.py" ]
[ "print('Importing packages...')\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.dates as mdates\nimport datetime\n#sns.set(color_codes=True)\nimport matplotlib as mpl\nmpl.rcParams['pdf.fonttype'] = 42\nimport statistics as st\nsns.set_style('whitegrid', {'axes.linewidth' : 0.5})\nfrom statsmodels.distributions.empirical_distribution import ECDF\nimport scipy\nimport gc\n\ncolumn_list = ['scen_num', 'reopening_multiplier_4']\nfor ems_region in range(1,12):\n column_list.append('hosp_det_EMS-' + str(ems_region))\n column_list.append('hosp_det_cumul_EMS-' + str(ems_region))\n column_list.append('detected_cumul_EMS-' + str(ems_region))\n\n#Specify paths to trajectories. For this run, all trajectories were temporarily stored in the same folder.\n\nprint('Reading trajectories...')\nsub1 = pd.read_csv('trajectoriesDat_1.csv', usecols=column_list) #0.08 - 0.09\nprint('Trajectory 1 read.')\nsub2 = pd.read_csv('trajectoriesDat_2.csv', usecols=column_list) #0.10 - 0.115\nprint('Trajectory 2 read.')\nsub3 = pd.read_csv('trajectoriesDat_3.csv', usecols=column_list) #0.087 - 0.10\nprint('Trajectory 3 read.')\nsub4 = pd.read_csv('trajectoriesDat_08.csv', usecols=column_list) # 0.08 - 0.10\nsub4['scen_num'] = sub4['scen_num'].values + 1000\nprint('Trajectory 4 read.')\nsub5 = pd.read_csv('trajectoriesDat_300.csv', usecols=column_list) #0.1 - 0.11\nsub5['scen_num'] = sub5['scen_num'].values + 2000\nprint('Trajectory 5 read.')\nsub6 = pd.read_csv('trajectoriesDat_600.csv', usecols=column_list) #0.115 - 0.13\nsub6['scen_num'] = sub6['scen_num'].values + 2000\nprint('Trajectory 6 read.')\nsub7 = pd.read_csv('trajectoriesDat_1000.csv', usecols=column_list) #0.13 - 0.15\nsub7['scen_num'] = sub7['scen_num'].values + 2000\nprint('Trajectory 7 read.')\nsub8 = pd.read_csv('trajectoriesDat_15.csv', usecols=column_list) #0.13 - 0.15\nsub8['scen_num'] = sub8['scen_num'].values + 3000\nprint('Trajectory 8 read.')\n\n###loop here\nfor region in ['NE', 'NC', 'CE', 'SO']:\n for capacity in ['high', 'low']:\n for metric in ['det', 'hosp']: #current implementation only allows tracking new_detected and new_hosp.\n boink = []\n\n ### Region\n\n #hospital_capacity = 1907\n #NE 4919 8609 12299\n #NC 1089 1907 2724\n #CE 856 1498 2140\n #SO 640 1121 1601\n\n ### Metric to assess:\n if metric == 'det':\n notif = 'new_det_' + region\n if metric == 'hosp':\n notif = 'new_hosp_det_' + region\n\n ### Simulation Dates to Examine\n lower_limit = 145\n upper_limit = 225\n grain = 1\n\n prob_over_array = []\n range_1 = np.arange(0, 25, 0.01)\n\n ### Capacity\n ### Which trajectories to use for each capacity were determined by hand.\n if region == 'NE':\n if capacity == 'low':\n hospital_capacity = 4919\n trajectories = pd.concat([sub1, sub3, sub4]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 8609\n trajectories = pd.concat([sub1, sub2, sub3]).reset_index()\n elif region == 'NC':\n if capacity == 'low':\n hospital_capacity = 1089\n trajectories = pd.concat([sub4, sub5, sub6, sub7]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1907\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n elif region == 'CE':\n if capacity == 'low':\n hospital_capacity = 856\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1498\n trajectories = sub8 #pd.concat([sub5, sub6, sub7, sub8]).reset_index() ##need new\n elif region == 'SO':\n if capacity == 'low':\n hospital_capacity = 640\n trajectories = pd.concat([sub1, sub2, sub3]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1121\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n\n #NE Region\n\n trajectories['hosp_det_NE'] = trajectories['hosp_det_EMS-11'] + \\\n trajectories['hosp_det_EMS-10'] + \\\n trajectories['hosp_det_EMS-9'] + \\\n trajectories['hosp_det_EMS-8'] + \\\n trajectories['hosp_det_EMS-7']\n\n trajectories['hosp_det_cumul_NE'] = trajectories['hosp_det_cumul_EMS-11'] + \\\n trajectories['hosp_det_cumul_EMS-10'] + \\\n trajectories['hosp_det_cumul_EMS-9'] + \\\n trajectories['hosp_det_cumul_EMS-8'] + \\\n trajectories['hosp_det_cumul_EMS-7']\n\n trajectories['detected_cumul_NE'] = trajectories['detected_cumul_EMS-11'] + \\\n trajectories['detected_cumul_EMS-10'] + \\\n trajectories['detected_cumul_EMS-9'] + \\\n trajectories['detected_cumul_EMS-8'] + \\\n trajectories['detected_cumul_EMS-7']\n\n #NC Region\n\n trajectories['hosp_det_NC'] = trajectories['hosp_det_EMS-1'] + trajectories['hosp_det_EMS-2'] \n trajectories['hosp_det_cumul_NC'] = trajectories['hosp_det_cumul_EMS-1'] + trajectories['hosp_det_cumul_EMS-2'] \n trajectories['detected_cumul_NC'] = trajectories['detected_cumul_EMS-1'] + trajectories['detected_cumul_EMS-2']\n\n #CE Region\n\n trajectories['hosp_det_CE'] = trajectories['hosp_det_EMS-3'] + trajectories['hosp_det_EMS-6'] \n trajectories['hosp_det_cumul_CE'] = trajectories['hosp_det_cumul_EMS-3'] + trajectories['hosp_det_cumul_EMS-6'] \n trajectories['detected_cumul_CE'] = trajectories['detected_cumul_EMS-3'] + trajectories['detected_cumul_EMS-6']\n\n #SO Region\n\n trajectories['hosp_det_SO'] = trajectories['hosp_det_EMS-4'] + trajectories['hosp_det_EMS-5'] \n trajectories['hosp_det_cumul_SO'] = trajectories['hosp_det_cumul_EMS-4'] + trajectories['hosp_det_cumul_EMS-5'] \n trajectories['detected_cumul_SO'] = trajectories['detected_cumul_EMS-4'] + trajectories['detected_cumul_EMS-5']\n\n print('Region: ' + region)\n print('Capacity: ' + str(capacity))\n print('Metric: ' + str(notif))\n thresh = []\n p_array = []\n dates_array = []\n over_array = []\n no_array = []\n days_array = np.arange(lower_limit,upper_limit, grain)\n for notif_period in days_array:\n trajectories_new = trajectories\n unique_scen = np.array(list(set(trajectories_new['scen_num'].values)))\n overflow_date = []\n max_date = []\n #notif = 'new_detected'\n overflow_traj = []\n traj = []\n non_overflow_traj = []\n overflow_scens = []\n non_overflow_scens = []\n non_overflow_crit_day = []\n overflow_crit_day = []\n overflow_week = []\n overflow_prior_week = []\n non_overflow_week = []\n non_overflow_prior_week = []\n crit_day = []\n week = []\n week_prior = []\n crit = notif_period\n for scen in unique_scen:\n new = trajectories_new[(trajectories_new['scen_num'] == scen)].reset_index()\n new['new_hosp_det_NE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NE'].values))\n new['new_det_NE'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NE'].values))\n new['new_hosp_det_NC'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NC'].values))\n new['new_det_NC'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NC'].values))\n new['new_hosp_det_CE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_CE'].values))\n new['new_det_CE'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_CE'].values))\n new['new_hosp_det_SO'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_SO'].values))\n new['new_det_SO'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_SO'].values))\n hosp = new['hosp_det_' + region].values #new['hosp_det'].values\n i = 0\n traj.append(hosp)\n while (hosp[i] < hospital_capacity) & (i < len(hosp)-1):\n i += 1\n crit_day.append(i)\n if i == len(hosp) - 1:\n non_overflow_traj.append(hosp)\n non_overflow_scens.append(scen)\n\n #crit_day.append(i)\n non_overflow_week.append(np.mean(new[notif].values[crit-7:crit]))\n non_overflow_prior_week.append(np.mean(new[notif].values[crit-14:crit-7]))\n else:\n overflow_traj.append(hosp)\n overflow_scens.append(scen)\n\n #crit_day.append(i)\n overflow_week.append(np.mean(new[notif].values[crit-7:crit]))\n overflow_prior_week.append(np.mean(new[notif].values[crit-14:crit-7]))\n overflow_week = np.array(overflow_week)\n overflow_prior_week = np.array(overflow_prior_week)\n non_overflow_week = np.array(non_overflow_week)\n non_overflow_prior_week = np.array(non_overflow_prior_week) \n overflow_date = np.array(overflow_date)\n max_date = np.array(max_date)\n week = np.array(week)\n crit_day = np.array(crit_day)\n week_prior = np.array(week_prior)\n boink.append(np.mean(week/week_prior))\n over = overflow_week/overflow_prior_week\n no = non_overflow_week/non_overflow_prior_week\n #ecdf_over = ECDF(over)\n #ecdf_no = ECDF(no)\n #prob_over = np.cumsum(ecdf_no(range_1)-ecdf_over(range_1))/np.sum(ecdf_no(range_1)-ecdf_over(range_1))\n #print('Mean Over: ' + str(np.mean(over)))\n #print('Mean No: ' + str(np.mean(no)))\n if np.mean(over) > np.mean(no):\n p_over = scipy.stats.norm.pdf(range_1, np.mean(over), np.std(np.append(over,no, axis=0)))\n p_no = scipy.stats.norm.pdf(range_1, np.mean(no), np.std(np.append(over,no, axis=0)))\n prob_over = p_over/(p_over+p_no)\n prob_over_array.append(prob_over)\n over_array.append(np.median(over))\n no_array.append(np.median(no))\n #thresh.append((np.median(over) + np.median(no))/2)\n stat, p = scipy.stats.ttest_ind(over,no)\n p_array.append(p)\n dates_array.append(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(crit)))\n print(crit)\n over_array = np.array(over_array)\n no_array = np.array(no_array)\n print('done')\n\n #trace fig\n full_dates_array = []\n for ni in np.arange(0,370,1):\n full_dates_array.append(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni)))\n plt.figure(figsize=(10,6))\n for traject in overflow_traj:\n if (len(traject) == len(full_dates_array)):\n plt.plot(full_dates_array, traject, color='r', alpha=0.1)\n for traject in non_overflow_traj:\n if (len(traject) == len(full_dates_array)):\n plt.plot(full_dates_array, traject, color='b', alpha=0.1)\n #plt.yscale('log')\n plt.hlines(hospital_capacity, xmin=dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(0)), xmax=dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni)))\n plt.xlim([dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(0)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni))])\n #plt.vlines(np.median(crit_day[crit_day != 369]),ymin=1,ymax=30000, linestyle='dashed', alpha=0.4)\n plt.ylabel(region + ' Hospitalized', fontsize=14)\n formatter = mdates.DateFormatter(\"%m-%y\")\n ax = plt.gca()\n ax.xaxis.set_major_formatter(formatter)\n #ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n #plt.xlabel('Simulation Day', fontsize=14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n #plt.savefig('sims_2.png', dpi=200)\n #plt.savefig('sims_2.pdf')\n print('Proportion of sims that do not exceed: ' + str(np.sum(crit_day == 369)/(len(trajectories)/370)))\n print('Number of trajectories: ' + str(len(trajectories)/370))\n\n\n #p-value fig\n plt.figure(figsize=(10,6))\n plt.plot(dates_array, p_array)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n ax = plt.gca()\n formatter = mdates.DateFormatter(\"%m-%d\")\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n #ax.xaxis.set_major_locator(mdates.MonthLocator())\n plt.yscale('log')\n plt.ylabel('Significance of Difference Between\\nOverflow Scenarios and Non-Overflow Scenarios\\n(p-value of t-test)', fontsize=14)\n plt.savefig('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.png', dpi=200)\n plt.savefig('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.pdf')\n pd.DataFrame({'date':dates_array, 'p_val':p_array}).to_csv('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.csv')\n\n\n #Threshold fig\n thresh_0 = .05\n thresh_1 = .20\n thresh_2 = .50\n thresh_3 = .80\n thresh_4 = .95\n thresh_0_array = []\n thresh_1_array = []\n thresh_2_array = []\n thresh_3_array = []\n thresh_4_array = []\n count = 0\n for prob_array in prob_over_array:\n i = 0\n while prob_array[i] < thresh_0:\n i += 1\n thresh_0_array.append(i)\n i = 0\n while prob_array[i] < thresh_1:\n i += 1\n thresh_1_array.append(i)\n i = 0\n while prob_array[i] < thresh_2:\n i += 1\n thresh_2_array.append(i)\n i = 0\n while prob_array[i] < thresh_3:\n i += 1\n thresh_3_array.append(i)\n i = 0\n while prob_array[i] < thresh_4:\n i += 1\n thresh_4_array.append(i)\n count += 1\n print(count)\n thresh_0_array = np.array(thresh_0_array)\n thresh_1_array = np.array(thresh_1_array)\n thresh_2_array = np.array(thresh_2_array)\n thresh_3_array = np.array(thresh_3_array)\n thresh_4_array = np.array(thresh_4_array)\n\n plt.figure(figsize=(10,6))\n\n plt.plot(dates_array, 100*(range_1[thresh_4_array]-1), alpha=1.0, color='r', label='95% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_3_array]-1), alpha=0.75, color='r', label='80% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_2_array]-1), alpha=1.0, color='k', linestyle='dashed', label='50% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_1_array]-1), alpha=0.50, color='r', label='20% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_0_array]-1), alpha=0.25, color='r', label='5% chance of exceeding capacity')\n #plt.axvline(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(193)))\n ax = plt.gca()\n formatter = mdates.DateFormatter(\"%m-%d\")\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n overflows_occur = 175\n alpha = 0.02\n for ele in np.sort(crit_day[crit_day != 369].copy()):\n plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ele)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(upper_limit+5))], y1=-30, y2=120, color='k', alpha=alpha, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n #plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(overflows_occur)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(205))], y1=-30, y2=120, color='k', alpha=0.05, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n #plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(overflows_occur+2)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(205))], y1=-30, y2=120, color='k', alpha=0.05, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n plt.xlim([dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(145)),dt.datetime(month=10, day=1, year=2020)])\n plt.ylim([-30,100])\n plt.ylabel('Threshold % change in\\n' + notif + '\\nfrom previous week', fontsize=14)\n plt.xlabel('Date of Assessment', fontsize=14)\n plt.legend(fontsize=12)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n #plt.savefig('overflow_prob_draft_2.png', dpi=200)\n #plt.savefig('overflow_prob_draft_2.pdf')\n plt.savefig('overflow_prob_draft_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.png', dpi=200)\n plt.savefig('overflow_prob_draft_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.pdf')" ]
[ [ "matplotlib.pyplot.legend", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.dates.DayLocator", "matplotlib.pyplot.gca", "pandas.read_csv", "numpy.arange", "numpy.diff", "matplotlib.dates.MonthLocator", "matplotlib.pyplot.figure", "matplotlib.dates.DateFormatter", "pandas.concat", "matplotlib.pyplot.ylim", "numpy.median", "numpy.append", "numpy.array", "numpy.sum", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yscale", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "scipy.stats.ttest_ind" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
conradjones/ngraph-bridge
[ "042011e6653b3ac0983511cf6604f9881cc6ee4b", "042011e6653b3ac0983511cf6604f9881cc6ee4b", "042011e6653b3ac0983511cf6604f9881cc6ee4b" ]
[ "test/python/test_tanhgrad.py", "examples/mnist/mnist_deep_simplified_distributed.py", "test/python/test_ngraph_serialize_flag.py" ]
[ "# ==============================================================================\n# Copyright 2018-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"nGraph TensorFlow bridge AvgPoolBackprop operation test\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops.gen_math_ops import tanh_grad\nfrom common import NgraphTest\n\n\nclass TestTanhGradOp(NgraphTest):\n\n def test_tanhgrad_2d(self):\n y = constant_op.constant(\n self.generate_random_numbers(30, 1.0, 10.0), shape=[10, 3])\n y_delta = constant_op.constant(\n self.generate_random_numbers(30, 0.0, 10.0), shape=[10, 3])\n\n out = tanh_grad(y, y_delta)\n\n def run_test(sess):\n return sess.run(out)\n\n assert np.allclose(\n self.with_ngraph(run_test), self.without_ngraph(run_test))\n\n def test_tanhgrad_3d(self):\n y = constant_op.constant(\n self.generate_random_numbers(60, 5.0, 30.0), shape=[10, 3, 2])\n y_delta = constant_op.constant(\n self.generate_random_numbers(60, 10.0, 40.0), shape=[10, 3, 2])\n\n out = tanh_grad(y, y_delta)\n\n def run_test(sess):\n return sess.run(out)\n\n assert np.allclose(\n self.with_ngraph(run_test), self.without_ngraph(run_test))\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file is derived from\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist_deep.py\n# with changed by Intel using Horovod.\n#\n# Copyright 2017-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simplified deep MNIST classifier using convolutional layers.\nThis script has the following changes when compared to mnist_deep.py:\n1. no dropout layer (which disables the rng op)\n2. no truncated normal initialzation(which disables the while op)\n\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/pros\n\"\"\"\n# Disable linter warnings to maintain consistency with tutorial.\n# pylint: disable=invalid-name\n# pylint: disable=g-bad-import-order\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport tempfile\nimport getpass\nimport time\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\nimport ngraph_bridge\nimport horovod.tensorflow as hvd\n\nFLAGS = None\n\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, a scalar placeholder). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). The scalar placeholder is meant for the probability of dropout. Since we don't\n use a dropout layer in this script, this placeholder is of no relavance and acts as a dummy.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32], \"W_conv1\")\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64], \"W_conv2\")\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([7 * 7 * 64, 1024], \"W_fc1\")\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 10], \"W_fc2\")\n b_fc2 = bias_variable([10])\n\n # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2\n return y_conv, tf.placeholder(tf.float32)\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(\n x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape, name):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n weight_var = tf.get_variable(name, shape)\n return weight_var\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef train_mnist_cnn(FLAGS):\n # Config\n config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n inter_op_parallelism_threads=1)\n config_ngraph_enabled = ngraph_bridge.update_config(config)\n\n # Note: Additional configuration option to boost performance is to set the\n # following environment for the run:\n # OMP_NUM_THREADS=44 KMP_AFFINITY=granularity=fine,scatter\n # The OMP_NUM_THREADS number should correspond to the number of\n # cores in the system\n\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 784])\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n # Build the graph for the deep net\n y_conv, keep_prob = deepnn(x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=y_, logits=y_conv)\n cross_entropy = tf.reduce_mean(cross_entropy)\n\n # add distributed wrapper to \"adam_optimizer\"\n opt = hvd.DistributedOptimizer(tf.train.AdamOptimizer(1e-4))\n global_step = tf.contrib.framework.get_or_create_global_step()\n with tf.name_scope('distributed_optimizer'):\n train_step = opt.minimize(cross_entropy, global_step=global_step)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n tf.summary.scalar('Training accuracy', accuracy)\n tf.summary.scalar('Loss function', cross_entropy)\n\n graph_location = \"/tmp/\" + getpass.getuser(\n ) + \"/tensorboard-logs/mnist-convnet\"\n print('Saving graph to: %s' % graph_location)\n\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(graph_location)\n train_writer.add_graph(tf.get_default_graph())\n\n saver = tf.train.Saver()\n train_loops = FLAGS.train_loop_count\n num_test_images = FLAGS.test_image_count\n hooks = [\n # Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states\n # from rank 0 to all other processes. This is necessary to ensure consistent\n # initialization of all workers when training is started with random weights\n # or restored from a checkpoint.\n hvd.BroadcastGlobalVariablesHook(0),\n # Horovod: adjust number of steps based on number of ranks.\n #tf.train.StopAtStepHook(train_loops // hvd.size())\n tf.train.StopAtStepHook(train_loops)\n ]\n\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, config=config_ngraph_enabled) as sess:\n\n step = 0\n start = time.time()\n\n loss_values = []\n test_accuracy = []\n while not sess.should_stop():\n batch = mnist.train.next_batch(FLAGS.batch_size)\n sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})\n step += 1\n if step % 10 == 0:\n t = time.time()\n if hvd.rank() == 0:\n print('step %d training accuracy %g %g sec to evaluate' %\n (step,\n sess.run(\n accuracy, feed_dict={\n x: batch[0],\n y_: batch[1]\n }), time.time() - t))\n t = time.time()\n _, summary, loss = sess.run([train_step, merged, cross_entropy],\n feed_dict={\n x: batch[0],\n y_: batch[1],\n keep_prob: 0.5\n })\n loss_values.append(loss)\n if hvd.rank() == 0:\n print('step %d, loss %g, %g sec for training step' %\n (step, loss, time.time() - t))\n train_writer.add_summary(summary, step)\n\n if step == (train_loops // hvd.size() - 1) and hvd.rank() == 0:\n x_test = mnist.test.images[:num_test_images]\n y_test = mnist.test.labels[:num_test_images]\n print('test accuracy: ',\n sess.run(accuracy, feed_dict={\n x: x_test,\n y_: y_test\n }))\n test_accuracy.append(accuracy)\n\n print(\"Training finished. Running test\")\n saver.save(sess, FLAGS.model_dir)\n return loss_values, test_accuracy\n\n\ndef main(_):\n train_mnist_cnn(FLAGS)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory where input data is stored')\n\n parser.add_argument(\n '--train_loop_count',\n type=int,\n default=1000,\n help='Number of training iterations')\n\n parser.add_argument('--batch_size', type=int, default=50, help='Batch Size')\n\n parser.add_argument(\n '--test_image_count',\n type=int,\n default=None,\n help=\"Number of test images to evaluate on\")\n\n parser.add_argument(\n '--model_dir',\n type=str,\n default='./mnist_trained/',\n help='enter model dir')\n\n FLAGS, unparsed = parser.parse_known_args()\n hvd.init()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# ==============================================================================\n# Copyright 2019-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pytest for a simple run on model testing framework\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport platform\nimport os\n\nimport tensorflow as tf\nimport numpy as np\nimport re\n\nfrom common import NgraphTest\nimport ngraph_bridge\n\n\nclass TestNgraphSerialize(NgraphTest):\n\n def test_ng_serialize_to_json(self):\n initial_contents = set(os.listdir())\n xshape = (3, 4, 5)\n x = tf.placeholder(tf.float32, shape=xshape)\n out = tf.nn.l2_loss(tf.abs(x))\n values = np.random.rand(*xshape)\n\n config = ngraph_bridge.update_config(tf.ConfigProto())\n ngraph_enable_serialize = os.environ.pop('NGRAPH_ENABLE_SERIALIZE',\n None)\n os.environ['NGRAPH_ENABLE_SERIALIZE'] = '1'\n ngraph_bridge.enable()\n with tf.Session(config=config) as sess:\n out = sess.run((out), feed_dict={x: values})\n os.environ.pop('NGRAPH_ENABLE_SERIALIZE', None)\n if ngraph_enable_serialize is not None:\n os.environ['NGRAPH_ENABLE_SERIALIZE'] = \\\n ngraph_enable_serialize\n\n final_contents = set(os.listdir())\n assert (len(final_contents) - len(initial_contents) == 1)\n new_files = final_contents.difference(initial_contents)\n flname = new_files.pop()\n assert (flname.startswith('tf_function_') and flname.endswith('json'))\n os.remove(flname)\n" ]
[ [ "tensorflow.python.ops.gen_math_ops.tanh_grad" ], [ "tensorflow.get_variable", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.train.MonitoredTrainingSession", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.name_scope", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.train.StopAtStepHook", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.constant", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.contrib.framework.get_or_create_global_step" ], [ "tensorflow.placeholder", "tensorflow.ConfigProto", "numpy.random.rand", "tensorflow.Session", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
apoorvanand/Deep-Virtual-Try-On
[ "56d536d46913afb8504ad3336697f2adf7dc965c" ]
[ "lib/geometric_matching_multi_gpu.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torchvision import models\nimport os\nimport torch.nn.functional as F\nimport numpy as np\nimport sys\n\nsys.path.append('..')\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Linear') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('Linear') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef init_weights(net, init_type='normal'):\n print('initialization method [%s]' % init_type)\n if init_type == 'normal':\n net.apply(weights_init_normal)\n elif init_type == 'xavier':\n net.apply(weights_init_xavier)\n elif init_type == 'kaiming':\n net.apply(weights_init_kaiming)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\nclass FeatureExtraction(nn.Module):\n def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(FeatureExtraction, self).__init__()\n downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)\n model = [downconv, nn.ReLU(True), norm_layer(ngf)]\n for i in range(n_layers):\n in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512\n out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512\n downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)\n model += [downconv, nn.ReLU(True)]\n model += [norm_layer(out_ngf)]\n model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]\n model += [norm_layer(512)]\n model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]\n \n self.model = nn.Sequential(*model)\n init_weights(self.model, init_type='normal')\n\n def forward(self, x):\n return self.model(x)\n\nclass FeatureL2Norm(torch.nn.Module):\n def __init__(self):\n super(FeatureL2Norm, self).__init__()\n\n def forward(self, feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature,norm)\n \nclass FeatureCorrelation(nn.Module):\n def __init__(self):\n super(FeatureCorrelation, self).__init__()\n \n def forward(self, feature_A, feature_B):\n b,c,h,w = feature_A.size()\n # reshape features for matrix multiplication\n feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w)\n feature_B = feature_B.view(b,c,h*w).transpose(1,2)\n # perform matrix mult.\n feature_mul = torch.bmm(feature_B,feature_A)\n correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2)\n return correlation_tensor\n \nclass FeatureRegression(nn.Module):\n def __init__(self, input_nc=512,output_dim=6, use_cuda=True):\n super(FeatureRegression, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.linear = nn.Linear(64 * 4 * 3, output_dim)\n self.tanh = nn.Tanh()\n # if use_cuda:\n # self.conv.cuda()\n # self.linear.cuda()\n # self.tanh.cuda()\n\n def forward(self, x):\n x = self.conv(x)\n x = x.reshape(x.size(0), -1)\n x = self.linear(x)\n x = self.tanh(x)\n return x\n\nclass AffineGridGen(nn.Module):\n def __init__(self, out_h=256, out_w=192, out_ch = 3):\n super(AffineGridGen, self).__init__() \n self.out_h = out_h\n self.out_w = out_w\n self.out_ch = out_ch\n \n def forward(self, theta):\n theta = theta.contiguous()\n batch_size = theta.size()[0]\n out_size = torch.Size((batch_size,self.out_ch,self.out_h,self.out_w))\n return F.affine_grid(theta, out_size)\n \nclass TpsGridGen(nn.Module):\n def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0, use_cuda=True):\n super(TpsGridGen, self).__init__()\n self.out_h, self.out_w = out_h, out_w\n self.reg_factor = reg_factor\n self.use_cuda = use_cuda\n\n # create grid in numpy\n self.grid = np.zeros([self.out_h, self.out_w, 3], dtype=np.float32)\n # sampling grid with dim-0 coords (Y)\n self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h))\n # grid_X,grid_Y: size [1,H,W,1,1]\n self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)\n self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)\n if use_cuda:\n self.grid_X = self.grid_X.cuda()\n self.grid_Y = self.grid_Y.cuda()\n\n # initialize regular grid for control points P_i\n if use_regular_grid:\n axis_coords = np.linspace(-1,1,grid_size)\n self.N = grid_size*grid_size\n P_Y,P_X = np.meshgrid(axis_coords,axis_coords)\n P_X = np.reshape(P_X,(-1,1)) # size (N,1)\n P_Y = np.reshape(P_Y,(-1,1)) # size (N,1)\n P_X = torch.FloatTensor(P_X)\n P_Y = torch.FloatTensor(P_Y)\n self.P_X_base = P_X.clone()\n self.P_Y_base = P_Y.clone()\n self.Li = self.compute_L_inverse(P_X,P_Y).unsqueeze(0)\n self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)\n self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)\n if use_cuda:\n self.P_X = self.P_X.cuda()\n self.P_Y = self.P_Y.cuda()\n self.P_X_base = self.P_X_base.cuda()\n self.P_Y_base = self.P_Y_base.cuda()\n\n def forward(self, theta):\n gpu_id = theta.get_device()\n self.grid_X = self.grid_X.to(gpu_id)\n self.grid_Y = self.grid_Y.to(gpu_id)\n self.P_X = self.P_X.to(gpu_id)\n self.P_Y = self.P_Y.to(gpu_id)\n self.P_X_base = self.P_X_base.to(gpu_id)\n self.P_Y_base = self.P_Y_base.to(gpu_id)\n self.Li = self.Li.to(gpu_id) \n warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3))\n return warped_grid\n \n def compute_L_inverse(self,X,Y):\n N = X.size()[0] # num of points (along dim 0)\n # construct matrix K\n Xmat = X.expand(N,N)\n Ymat = Y.expand(N,N)\n P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)\n P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation\n K = torch.mul(P_dist_squared,torch.log(P_dist_squared))\n # construct matrix L\n O = torch.FloatTensor(N,1).fill_(1)\n Z = torch.FloatTensor(3,3).fill_(0) \n P = torch.cat((O,X,Y),1)\n L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)\n self.Li = torch.inverse(L)\n if self.use_cuda:\n self.Li = self.Li.cuda()\n return self.Li\n \n def apply_transformation(self,theta,points):\n if theta.dim()==2:\n theta = theta.unsqueeze(2).unsqueeze(3)\n # points should be in the [B,H,W,2] format,\n # where points[:,:,:,0] are the X coords \n # and points[:,:,:,1] are the Y coords \n \n # input are the corresponding control points P_i\n batch_size = theta.size()[0]\n # split theta into point coordinates\n Q_X=theta[:,:self.N,:,:].squeeze(3)\n Q_Y=theta[:,self.N:,:,:].squeeze(3)\n Q_X = Q_X + self.P_X_base.expand_as(Q_X)\n Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)\n \n # get spatial dimensions of points\n points_b = points.size()[0]\n points_h = points.size()[1]\n points_w = points.size()[2]\n \n # repeat pre-defined control points along spatial dimensions of points to be transformed\n P_X = self.P_X.expand((1,points_h,points_w,1,self.N))\n P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N))\n \n # compute weigths for non-linear part\n W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X)\n W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y)\n # reshape\n # W_X,W,Y: size [B,H,W,1,N]\n W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n # compute weights for affine part\n A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X)\n A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y)\n # reshape\n # A_X,A,Y: size [B,H,W,1,3]\n A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n \n # compute distance P_i - (grid_X,grid_Y)\n # grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch\n points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N))\n points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N))\n \n if points_b==1:\n delta_X = points_X_for_summation-P_X\n delta_Y = points_Y_for_summation-P_Y\n else:\n # use expanded P_X,P_Y in batch dimension\n delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation)\n delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation)\n \n dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2)\n # U: size [1,H,W,1,N]\n dist_squared[dist_squared==0]=1 # avoid NaN in log computation\n U = torch.mul(dist_squared,torch.log(dist_squared)) \n \n # expand grid in batch dimension if necessary\n points_X_batch = points[:,:,:,0].unsqueeze(3)\n points_Y_batch = points[:,:,:,1].unsqueeze(3)\n if points_b==1:\n points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:])\n points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:])\n \n points_X_prime = A_X[:,:,:,:,0]+ \\\n torch.mul(A_X[:,:,:,:,1],points_X_batch) + \\\n torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \\\n torch.sum(torch.mul(W_X,U.expand_as(W_X)),4)\n \n points_Y_prime = A_Y[:,:,:,:,0]+ \\\n torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \\\n torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \\\n torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4)\n \n return torch.cat((points_X_prime,points_Y_prime),3)\n \n# Defines the Unet generator.\n# |num_downs|: number of downsamplings in UNet. For example,\n# if |num_downs| == 7, image of size 128x128 will become of size 1x1\n# at the bottleneck\nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)\n for i in range(num_downs - 5):\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)\n\n self.model = unet_block\n\n def forward(self, input):\n return self.model(input)\n\n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n# |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n use_bias = norm_layer == nn.InstanceNorm2d\n\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downconv]\n up = [uprelu, upsample, upconv, upnorm]\n model = down + [submodule] + up\n elif innermost:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upsample, upconv, upnorm]\n model = down + up\n else:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc*2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upsample, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else:\n return torch.cat([x, self.model(x)], 1)\n\nclass Vgg19(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(21, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n \n def forward(self, X):\n h_relu1 = self.slice1(X)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out\n\nclass VGGLoss(nn.Module):\n def __init__(self, layids = None):\n super(VGGLoss, self).__init__()\n self.vgg = Vgg19()\n self.vgg.cuda()\n self.criterion = nn.L1Loss()\n self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]\n self.layids = layids\n\n def forward(self, x, y):\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n if self.layids is None:\n self.layids = list(range(len(x_vgg)))\n for i in self.layids:\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n return loss\n\nclass GMM(nn.Module):\n \"\"\" Geometric Matching Module\n \"\"\"\n def __init__(self, opt):\n super(GMM, self).__init__()\n self.extractionA = FeatureExtraction(22, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d) \n self.extractionB = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)\n self.l2norm = FeatureL2Norm()\n self.correlation = FeatureCorrelation()\n self.regression = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2, use_cuda=True)\n self.gridGen = TpsGridGen(opt.fine_height, opt.fine_width, use_cuda=True, grid_size=opt.grid_size)\n \n def forward(self, inputA, inputB):\n featureA = self.extractionA(inputA)\n featureB = self.extractionB(inputB)\n featureA = self.l2norm(featureA)\n featureB = self.l2norm(featureB)\n correlation = self.correlation(featureA, featureB)\n\n theta = self.regression(correlation)\n grid = self.gridGen(theta)\n return grid, theta\n\ndef save_checkpoint(model, save_path):\n if not os.path.exists(os.path.dirname(save_path)):\n os.makedirs(os.path.dirname(save_path))\n\n torch.save(model.cpu().state_dict(), save_path)\n model.cuda()\n\ndef load_checkpoint(model, checkpoint_path):\n if not os.path.exists(checkpoint_path):\n return\n model.load_state_dict(torch.load(checkpoint_path))\n model.cuda()\n\nif __name__ == '__main__':\n import config\n # in1 = torch.rand(4,3,256,192).cuda()\n # in2 = torch.rand(4,3,256,192).cuda()\n # cfg = config.Config().parse()\n # gmm = GMM(cfg)\n # gmm.cuda()\n # out = gmm(in1, in2)\n\n tps = TpsGridGen(256,192,True)\n theta = torch.randn(1,6)\n grid = tps(theta)\n print(grid.shape)\n\n" ]
[ [ "numpy.linspace", "torch.cat", "torch.load", "torch.FloatTensor", "torch.nn.L1Loss", "torch.nn.functional.affine_grid", "torch.pow", "torch.Size", "torch.nn.Dropout", "numpy.reshape", "torch.randn", "torch.inverse", "torch.mul", "torch.bmm", "numpy.zeros", "torch.nn.Sequential", "torch.div", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.LeakyReLU", "torch.log", "torch.nn.BatchNorm2d", "numpy.meshgrid", "torch.nn.Tanh", "torch.nn.Upsample", "torch.nn.init.normal", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JJUNGYUN/tensorflow-fast-style-transfer
[ "faf8608399b14de008edf533169b2cf25c811dbc" ]
[ "utils.py" ]
[ "import numpy as np\nimport PIL.Image\nimport os\nimport scipy\nfrom matplotlib.pyplot import imread, imsave\nfrom skimage.transform import resize\n\n\n\"\"\"Helper-functions to load MSCOCO DB\"\"\"\n# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py\ndef get_img(src, img_size=False):\n img = imread(src)\n if not (len(img.shape) == 3 and img.shape[2] == 3):\n img = np.dstack((img,img,img))\n if img_size != False:\n img = resize(img, img_size)\n return img\n\ndef get_files(img_dir):\n files = list_files(img_dir)\n return list(map(lambda x: os.path.join(img_dir,x), files))\n\ndef list_files(in_path):\n files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n files.extend(filenames)\n break\n return files\n\n\"\"\"Helper-functions for image manipulation\"\"\"\n# borrowed from https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/15_Style_Transfer.ipynb\n\n# This function loads an image and returns it as a numpy array of floating-points.\n# The image can be automatically resized so the largest of the height or width equals max_size.\n# or resized to the given shape\ndef load_image(filename, shape=None, max_size=None):\n image = PIL.Image.open(filename)\n\n if max_size is not None:\n # Calculate the appropriate rescale-factor for\n # ensuring a max height and width, while keeping\n # the proportion between them.\n factor = float(max_size) / np.max(image.size)\n\n # Scale the image's height and width.\n size = np.array(image.size) * factor\n\n # The size is now floating-point because it was scaled.\n # But PIL requires the size to be integers.\n size = size.astype(int)\n\n # Resize the image.\n image = resize(size, PIL.Image.LANCZOS) # PIL.Image.LANCZOS is one of resampling filter\n\n if shape is not None:\n image = resize(shape, PIL.Image.LANCZOS) # PIL.Image.LANCZOS is one of resampling filter\n\n # Convert to numpy floating-point array.\n return np.float32(image)\n\n# Save an image as a jpeg-file.\n# The image is given as a numpy array with pixel-values between 0 and 255.\ndef save_image(image, filename):\n # Ensure the pixel-values are between 0 and 255.\n image = np.clip(image, 0.0, 255.0)\n\n # Convert to bytes.\n image = image.astype(np.uint8)\n\n # Write the image-file in jpeg-format.\n with open(filename, 'wb') as file:\n PIL.Image.fromarray(image).save(file, 'jpeg')" ]
[ [ "numpy.clip", "matplotlib.pyplot.imread", "numpy.dstack", "numpy.max", "numpy.float32", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vfdev-5/POT
[ "e757b75976ece1e6e53e655852b9f8863e7b6f5a" ]
[ "test/test_da.py" ]
[ "\"\"\"Tests for module da on Domain Adaptation \"\"\"\n\n# Author: Remi Flamary <[email protected]>\n#\n# License: MIT License\n\nimport numpy as np\nfrom numpy.testing.utils import assert_allclose, assert_equal\n\nimport ot\nfrom ot.datasets import make_data_classif\nfrom ot.utils import unif\n\n\ndef test_sinkhorn_lpl1_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornLpl1Transport()\n\n # test its computed\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornLpl1Transport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornLpl1Transport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n assert mass_semi == 0, \"semisupervised mode not working\"\n\n\ndef test_sinkhorn_l1l2_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornL1l2Transport()\n\n # test its computed\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"log_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornL1l2Transport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornL1l2Transport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]\n assert_allclose(mass_semi, np.zeros_like(mass_semi),\n rtol=1e-9, atol=1e-9)\n\n # check everything runs well with log=True\n otda = ot.da.SinkhornL1l2Transport(log=True)\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_sinkhorn_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornTransport()\n\n # test its computed\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"log_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornTransport()\n otda_unsup.fit(Xs=Xs, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornTransport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n assert mass_semi == 0, \"semisupervised mode not working\"\n\n # check everything runs well with log=True\n otda = ot.da.SinkhornTransport(log=True)\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_emd_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.EMDTransport()\n\n # test its computed\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.EMDTransport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.EMDTransport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]\n\n # we need to use a small tolerance here, otherwise the test breaks\n assert_allclose(mass_semi, np.zeros_like(mass_semi),\n rtol=1e-2, atol=1e-2)\n\n\ndef test_mapping_transport_class():\n \"\"\"test_mapping_transport\n \"\"\"\n\n ns = 60\n nt = 120\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n\n ##########################################################################\n # kernel == linear mapping tests\n ##########################################################################\n\n # check computation and dimensions if bias == False\n otda = ot.da.MappingTransport(kernel=\"linear\", bias=False)\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"mapping_\")\n assert hasattr(otda, \"log_\")\n\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[1], Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check computation and dimensions if bias == True\n otda = ot.da.MappingTransport(kernel=\"linear\", bias=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[1] + 1, Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n ##########################################################################\n # kernel == gaussian mapping tests\n ##########################################################################\n\n # check computation and dimensions if bias == False\n otda = ot.da.MappingTransport(kernel=\"gaussian\", bias=False)\n otda.fit(Xs=Xs, Xt=Xt)\n\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[0], Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check computation and dimensions if bias == True\n otda = ot.da.MappingTransport(kernel=\"gaussian\", bias=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[0] + 1, Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check everything runs well with log=True\n otda = ot.da.MappingTransport(kernel=\"gaussian\", log=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_linear_mapping():\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n A, b = ot.da.OT_mapping_linear(Xs, Xt)\n\n Xst = Xs.dot(A) + b\n\n Ct = np.cov(Xt.T)\n Cst = np.cov(Xst.T)\n\n np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)\n\n\ndef test_linear_mapping_class():\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otmap = ot.da.LinearTransport()\n\n otmap.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otmap, \"A_\")\n assert hasattr(otmap, \"B_\")\n assert hasattr(otmap, \"A1_\")\n assert hasattr(otmap, \"B1_\")\n\n Xst = otmap.transform(Xs=Xs)\n\n Ct = np.cov(Xt.T)\n Cst = np.cov(Xst.T)\n\n np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)\n" ]
[ [ "numpy.testing.utils.assert_equal", "numpy.cov", "numpy.zeros_like", "numpy.testing.assert_allclose", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TropComplique/bicycle-gan
[ "4bc8f4cdbe138e23c8a02c408cfb8e2ff7dfe6ab" ]
[ "networks/encoder.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass ResNetEncoder(nn.Module):\n\n def __init__(self, in_channels, out_dimension, depth=48, num_blocks=5):\n \"\"\"\n Arguments:\n in_channels: an integer.\n out_channels: an integer.\n depth: an integer.\n num_blocks: an integer, number of resnet blocks.\n \"\"\"\n super(ResNetEncoder, self).__init__()\n\n layers = [\n nn.Conv2d(in_channels, depth, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n ]\n\n for n in range(1, num_blocks + 1):\n in_depth = depth * min(4, n)\n out_depth = depth * min(4, n + 1)\n layers.append(BasicBlock(in_depth, out_depth))\n\n # so, after all these layers the\n # input is downsampled by 2**(1 + num_blocks)\n\n layers.extend([\n nn.LeakyReLU(0.2, inplace=True),\n nn.AdaptiveAvgPool2d(1)\n ])\n\n self.layers = nn.Sequential(*layers)\n self.fc1 = nn.Linear(out_depth, out_dimension)\n self.fc2 = nn.Linear(out_depth, out_dimension)\n\n def forward(self, x):\n \"\"\"\n I assume that h and w are\n divisible by 2**(1 + num_blocks).\n\n The input tensor represents\n images with pixel values in [0, 1] range.\n\n Arguments:\n x: a float tensor with shape [b, in_channels, h, w].\n Returns:\n two float tensors with shape [b, out_dimension].\n \"\"\"\n x = 2.0 * x - 1.0\n x = self.layers(x) # shape [b, out_channels, 1, 1]\n x = x.view(x.size(0), -1)\n\n mean = self.fc1(x)\n logvar = self.fc2(x)\n return mean, logvar\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(BasicBlock, self).__init__()\n\n self.layers = nn.Sequential(\n nn.InstanceNorm2d(in_channels, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(in_channels, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.AvgPool2d(kernel_size=2, stride=2)\n )\n\n self.shortcut = nn.Sequential(\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels, out_channels, kernel_size=1)\n )\n\n def forward(self, x):\n return self.layers(x) + self.shortcut(x)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.InstanceNorm2d", "torch.nn.LeakyReLU", "torch.nn.AdaptiveAvgPool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Rintarooo/MDVRP_MHA
[ "f196f1c99c3e4efa1ab6d75f4af77685afe4d191" ]
[ "Torch/Nets/decoder_utils.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass Env():\n\tdef __init__(self, x, node_embeddings):\n\t\tsuper().__init__()\n\t\t\"\"\"depot_xy: (batch, n_depot, 2)\n\t\t\tcustomer_xy: (batch, n_customer, 2)\n\t\t\t--> xy: (batch, n_node, 2); Coordinates of depot + customer nodes\n\t\t\tn_node= n_depot + n_customer\n\t\t\tdemand: (batch, n_customer)\n\t\t\t??? --> demand: (batch, n_car, n_customer)\n\t\t\tD(remaining car capacity): (batch, n_car)\n\t\t\tnode_embeddings: (batch, n_node, embed_dim)\n\t\t\t--> node_embeddings: (batch, n_car, n_node, embed_dim)\n\n\t\t\tcar_start_node: (batch, n_car); start node index of each car\n\t\t\tcar_cur_node: (batch, n_car); current node index of each car\n\t\t\tcar_run: (batch, car); distance each car has run \n\t\t\tpi: (batch, n_car, decoder_step); which index node each car has moved \n\t\t\tdist_mat: (batch, n_node, n_node); distance matrix\n\t\t\ttraversed_nodes: (batch, n_node)\n\t\t\ttraversed_customer: (batch, n_customer)\n\t\t\"\"\"\n\t\tself.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\t\tself.demand = x['demand']\n\t\tself.xy = torch.cat([x['depot_xy'], x['customer_xy']], 1)\n\t\tself.car_start_node, self.D = x['car_start_node'], x['car_capacity']\n\t\tself.car_cur_node = self.car_start_node\n\t\tself.pi = self.car_start_node.unsqueeze(-1)\n\n\t\tself.n_depot = x['depot_xy'].size(1)\n\t\tself.n_customer = x['customer_xy'].size(1)\n\t\tself.n_car = self.car_start_node.size(1)\n\t\tself.batch, self.n_node, self.embed_dim = node_embeddings.size()\n\t\tself.node_embeddings = node_embeddings[:,None,:,:].repeat(1,self.n_car,1,1)\n\t\t\n\t\tself.demand_include_depot = torch.cat([torch.zeros((self.batch, self.n_depot), dtype = torch.float, device = self.device), self.demand], dim = 1)\n\t\tassert self.demand_include_depot.size(1) == self.n_node, 'demand_include_depot'\n\t\t\n\t\t# self.demand = demand[:,None,:].repeat(1,self.n_car,1)\t\t\n\t\tself.car_run = torch.zeros((self.batch, self.n_car), dtype = torch.float, device = self.device)\n\n\t\tself.dist_mat = self.build_dist_mat()\n\t\tself.mask_depot, self.mask_depot_unused = self.build_depot_mask()\n\t\tself.traversed_customer = torch.zeros((self.batch, self.n_customer), dtype = torch.bool, device = self.device)\n\t\t\n\tdef build_dist_mat(self):\n\t\txy = self.xy.unsqueeze(1).repeat(1, self.n_node, 1, 1)\n\t\tconst_xy = self.xy.unsqueeze(2).repeat(1, 1, self.n_node, 1)\n\t\tdist_mat = torch.sqrt(((xy - const_xy) ** 2).sum(dim = 3))\n\t\treturn dist_mat\n\n\tdef build_depot_mask(self):\n\t\ta = torch.arange(self.n_depot, device = self.device).reshape(1, 1, -1).repeat(self.batch, self.n_car, 1)\n\t\tb = self.car_start_node[:,:,None].repeat(1, 1, self.n_depot)\n\t\tdepot_one_hot = (a==b).bool()#.long()\n\t\treturn depot_one_hot, torch.logical_not(depot_one_hot)\n\n\tdef get_mask(self, next_node, next_car):\n\t\t\"\"\"self.demand **excludes depot**: (batch, n_nodes-1)\n\t\t\tselected_demand: (batch, 1)\n\t\t\tif next node is depot, do not select demand\n\t\t\tself.D: (batch, n_car, 1), D denotes \"remaining vehicle capacity\"\n\t\t\tself.capacity_over_customer **excludes depot**: (batch, n_car, n_customer)\n\t\t\tvisited_customer **excludes depot**: (batch, n_customer, 1)\n\t\t\tis_next_depot: (batch, 1), e.g. [[True], [True], ...]\n\n\t\t\"\"\"\n\t\tis_next_depot = (self.car_cur_node == self.car_start_node).bool()#.long().sum(-1)\n\t\t# e.g., is_next_depot = next_node == 0 or next_node == 1\n\t\t# is_next_depot: (batch, n_car), e.g. [[True], [True], ...]\n\n\t\t\n\t\tnew_traversed_node = torch.eye(self.n_node, device = self.device)[next_node.squeeze(1)]\n\t\t# new_traversed_node: (batch, node)\n\t\tnew_traversed_customer = new_traversed_node[:,self.n_depot:]\n\t\t# new_traversed_customer: (batch, n_customer)\n\t\tself.traversed_customer = self.traversed_customer | new_traversed_customer.bool()\n\t\t# traversed_customer: (batch, n_customer)\n\n\t\tselected_demand = torch.gather(input = self.demand_include_depot, dim = 1, index = next_node)\n\t\t# selected_demand: (batch, 1)\n\t\tselected_car = torch.eye(self.n_car, device = self.device)[next_car.squeeze(1)]\n\t\t# selected_car: (batch, n_car)\n\t\tcar_used_demand = selected_car * selected_demand\n\t\t# car_used_demand: (batch, n_car) \t\t\n\t\tself.D -= car_used_demand\n\t\t# D: (batch, n_car)\n\t\t# self.D = torch.clamp(self.D, min = 0.)\n\t\t\n\t\tD_over_customer = self.demand[:,None,:].repeat(1,self.n_car,1) > self.D[:,:,None].repeat(1,1,self.n_customer)\n\t\tmask_customer = D_over_customer | self.traversed_customer[:,None,:].repeat(1,self.n_car,1)\n\t\t# mask_customer: (batch, n_car, n_customer)\n\n\t\tmask_depot = is_next_depot & ((mask_customer == False).long().sum(dim = 2).sum(dim = 1)[:,None].repeat(1,self.n_car) > 0)\n\t\t# mask_depot: (batch, n_car)\n\t\t\"\"\"mask_depot = True --> We cannot choose depot in the next step \n\t\t\tif 1) the vehicle is at the depot in the next step\n\t\t\tor 2) there is a customer node which has not been visited yet\n\t\t\"\"\"\n\n\t\tmask_depot = self.mask_depot & mask_depot.bool()[:,:,None].repeat(1,1,self.n_depot)\n\t\t# mask_depot: (batch, n_car, n_depot)\n\n\t\tmask_depot = self.mask_depot_unused | mask_depot\n\t\t\"\"\"mask_depot: (batch, n_car, n_depot) \n\t\t\tmask_customer: (batch, n_car, n_customer) \n\t\t\t--> return mask: (batch, n_car, n_node ,1)\n\t\t\"\"\"\n\t\treturn torch.cat([mask_depot, mask_customer], dim = -1).unsqueeze(-1)\n\t\t\n\tdef generate_step_context(self):\n\t\t\"\"\"D: (batch, n_car)\n\t\t\t--> D: (batch, n_car, 1, 1)\n\t\t\t\n\t\t\teach_car_idx: (batch, n_car, 1, embed_dim)\n\t\t\tnode_embeddings: (batch, n_car, n_node, embed_dim)\n\t\t\t--> prev_embeddings(initially, depot_embeddings): (batch, n_car, 1, embed)\n\t\t\tnode embeddings where car is located\n\t\t\t\n\t\t\treturn step_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\teach_car_idx = self.car_cur_node[:,:,None,None].repeat(1,1,1,self.embed_dim)\t\t\n\t\tprev_embeddings = torch.gather(input = self.node_embeddings, dim = 2, index = each_car_idx)\n\t\tstep_context = torch.cat([prev_embeddings, self.D[:,:,None,None]], dim = -1)\n\t\treturn step_context\n\n\tdef _get_step(self, next_node, next_car):\n\t\t\"\"\"next_node **includes depot** : (batch, 1) int(=long), range[0, n_node-1]\n\t\t\t\n\t\t\treturn\n\t\t\tmask: (batch, n_car, n_node ,1)\n\t\t\tstep_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\tself.update_node_path(next_node, next_car)\n\t\tself.update_car_distance()\n\t\tmask = self.get_mask(next_node, next_car)\n\t\tstep_context = self.generate_step_context()\n\t\treturn mask, step_context\n\n\tdef _get_step_t1(self):\n\t\t\"\"\"return\n\t\t\tmask: (batch, n_car, n_node ,1)\n\t\t\tstep_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\tmask_t1 = self.get_mask_t1()\n\t\tstep_context_t1 = self.generate_step_context()\t\t\n\t\treturn mask_t1, step_context_t1\n\n\tdef get_mask_t1(self):\n\t\t\"\"\"mask_depot: (batch, n_car, n_depot) \n\t\t\tmask_customer: (batch, n_car, n_customer) \n\t\t\t--> return mask: (batch, n_car, n_node ,1)\n\t\t\"\"\"\n\t\tmask_depot_t1 = self.mask_depot | self.mask_depot_unused\n\t\tmask_customer_t1 = self.traversed_customer[:,None,:].repeat(1,self.n_car,1)\n\t\treturn torch.cat([mask_depot_t1, mask_customer_t1], dim = -1).unsqueeze(-1)\n\t\t\n\tdef update_node_path(self, next_node, next_car):\n\t\t# car_node: (batch, n_car)\n\t\t# pi: (batch, n_car, decoder_step)\n\t\tself.car_prev_node = self.car_cur_node\n\t\ta = torch.arange(self.n_car, device = self.device).reshape(1, -1).repeat(self.batch, 1)\n\t\tb = next_car.reshape(self.batch, 1).repeat(1, self.n_car)\n\t\tmask_car = (a == b).long()\n\t\tnew_node = next_node.reshape(self.batch, 1).repeat(1, self.n_car)\n\t\tself.car_cur_node = mask_car * new_node + (1 - mask_car) * self.car_cur_node\n\t\t# (1-mask_car) keeps the same node for the unused car, mask_car updates new node for the used car\n\t\tself.pi = torch.cat([self.pi, self.car_cur_node.unsqueeze(-1)], dim = -1)\n\n\tdef update_car_distance(self):\n\t\tprev_node_dist_vec = torch.gather(input = self.dist_mat, dim = 1, index = self.car_prev_node[:,:,None].repeat(1,1,self.n_node))\n\t\t# dist = torch.gather(input = prev_node_dist_vec, dim = 2, index = self.car_cur_node[:,None,:].repeat(1,self.n_car,1))\n\t\tdist = torch.gather(input = prev_node_dist_vec, dim = 2, index = self.car_cur_node[:,:,None])\n\t\tself.car_run += dist.squeeze(-1)\n\t\t# print(self.car_run[0])\n\n\tdef return_depot_all_car(self):\n\t\tself.pi = torch.cat([self.pi, self.car_start_node.unsqueeze(-1)], dim = -1)\n\t\tself.car_prev_node = self.car_cur_node\n\t\tself.car_cur_node = self.car_start_node\n\t\tself.update_car_distance()\n\n\tdef get_log_likelihood(self, _log_p, _idx):\n\t\t\"\"\"_log_p: (batch, decode_step, n_car * n_node)\n\t\t\t_idx: (batch, decode_step, 1), selected index\n\t\t\"\"\"\n\t\tlog_p = torch.gather(input = _log_p, dim = 2, index = _idx)\n\t\treturn log_p.squeeze(-1).sum(dim = 1)\n\nclass Sampler(nn.Module):\n\t\"\"\"args; logits: (batch, n_car * n_nodes)\n\t\treturn; next_node: (batch, 1)\n\t\tTopKSampler --> greedy; sample one with biggest probability\n\t\tCategoricalSampler --> sampling; randomly sample one from possible distribution based on probability\n\t\"\"\"\n\tdef __init__(self, n_samples = 1, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tself.n_samples = n_samples\n\t\t\nclass TopKSampler(Sampler):\n\tdef forward(self, logits):\n\t\treturn torch.topk(logits, self.n_samples, dim = 1)[1]\n\t\t# torch.argmax(logits, dim = 1).unsqueeze(-1)\n\nclass CategoricalSampler(Sampler):\n\tdef forward(self, logits):\n\t\treturn torch.multinomial(logits.exp(), self.n_samples)" ]
[ [ "torch.zeros", "torch.cat", "torch.topk", "torch.eye", "torch.cuda.is_available", "torch.arange", "torch.gather", "torch.logical_not" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
piyueh/SEM-Exercises
[ "d25e6c1bc609022189952d97488828113cfb2206" ]
[ "utils/misc/misc.py" ]
[ "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 Pi-Yueh Chuang <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"Some misc functions\"\"\"\n\nimport numpy\nimport numbers\nimport functools\n\n# TODO: replace assertion with if ... raise\n\n\ndef factorial(n):\n \"\"\"Naive implementation of factorial\n\n For serious use, please consider scipy.special.factorial\n\n Args:\n n: an integer\n Returns:\n n!\n \"\"\"\n\n if not isinstance(n, (int, numpy.int_)):\n raise ValueError(\n \"n is not an integer: {0}, {1}\".format(n, type(n)))\n\n if n == 0:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(1, n+1))\n\n\ndef factorial_division(bg, end):\n \"\"\"Naive implementation of factorial division: end! / bg!\n\n This function is to avoid integer overflow. If end and bg are big, it is\n dangerous to use fractional(end) / fractional(bg) due to the potential of\n integer overflow.\n\n For serious use, please consider scipy.special.factorial\n\n Args:\n bg: the beginning integer\n end: the endding integer\n Returns:\n end! / bg!\n \"\"\"\n\n if not isinstance(bg, (int, numpy.int_)):\n raise ValueError(\n \"bg is not an integer: {0}, {1}\".format(bg, type(bg)))\n if not isinstance(end, (int, numpy.int_)):\n raise ValueError(\n \"end is not an integer: {0}, {1}\".format(end, type(end)))\n if bg < 0:\n raise ValueError(\"bg can not be smaller than zero!\")\n if end < bg:\n raise ValueError(\n \"end should larger than or equal to bg: \" +\n \"bg={0}, end={1}\".format(bg, end))\n\n if end == bg:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(bg+1, end+1))\n\n\ndef gamma(n):\n \"\"\"Naive implementation of gamma function (integer input)\n\n For serious use, please consider scipy.special.gamma\n\n Args:\n n: the integer\n Returns:\n (n-1)!\n \"\"\"\n return factorial(n-1)\n\n\ndef strip_trivial(z, tol=1e-8):\n \"\"\"if any element in array z is smaller than tol, we set it to zero\n\n Args:\n z: the array to be cleaned\n tol: the tolerance\n\n Returns:\n \"\"\"\n # TODO implement different way to lower the dependence of numpy\n z = z.astype(numpy.complex128)\n z = numpy.where(numpy.abs(z.real) < tol, z.imag*1j, z)\n z = numpy.where(numpy.abs(z.imag) < tol, z.real, z)\n z = numpy.real(z) if (z.imag == 0).all() else z\n\n return z\n\n\ndef check_array(arry, msg=\"Can't convert input to numpy.ndarray\"):\n \"\"\"check whether the input is a numpy array, and try to convert it\n\n Args:\n arry: the data to be checked\n msg: the message to be passed to error instance\n\n Returns:\n arry as a numpy.ndarray\n\n Raise:\n TypeError, if it fail to convert the input to a numpy array\n \"\"\"\n\n if isinstance(arry, (numbers.Number, numpy.number)):\n return numpy.array([arry])\n elif isinstance(arry, list):\n return numpy.array(arry)\n elif isinstance(arry, numpy.ndarray):\n return arry\n else:\n raise TypeError(msg)\n" ]
[ [ "numpy.real", "numpy.array", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ztultrebor/Kaggle-Santander_Challenge
[ "af5132f986089553a2192183f53ed3b0ec2bcf1b" ]
[ "XGB.py" ]
[ "#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nfrom GridSearch import GridSearch\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom xgboost import XGBClassifier\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import roc_auc_score\n\n\n#===================================prep data==================================\n\nnp.random.seed(42)\n\ntarget_col = 'TARGET'\nid_col = 'ID'\n\nX_train = pd.read_csv('./Level1Data/Xtrain.csv')\nX_train['GBpred'] = pd.read_csv('./Level1Data/GBPredtrain.csv')\nX_train['ADApred'] = pd.read_csv('./Level1Data/ADAPredtrain.csv')\ny_train = pd.read_csv('./Level1Data/ytrain.csv')[target_col]\n\n\n#==========================Gradient Boost Classifier===========================\n\nparams = {\n 'n_estimators' : scipy.stats.geom(1/150.),\n 'max_depth' : scipy.stats.randint(2,7),\n 'learning_rate' : scipy.stats.expon(0, 0.01),\n 'min_samples_leaf' : scipy.stats.geom(1/10.),\n 'subsample' : scipy.stats.beta(2,1),\n 'colsample_bytree' : scipy.stats.beta(2,1)\n }\n\nclf = XGBClassifier()\n\nGridSearch(\n classifier = clf,\n paramdict = params,\n iters = 729,\n X = X_train,\n y = y_train,\n X_reserve = None,\n y_reserve = None\n)\n" ]
[ [ "pandas.read_csv", "numpy.random.seed", "scipy.stats.expon", "scipy.stats.beta", "scipy.stats.randint", "scipy.stats.geom" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
CatherineH/python-sewing
[ "01873f6341c7ce8e26d4e61aab9d52a586d667f6" ]
[ "merge_pieces.py" ]
[ "from svgpathtools import svg2paths, Path, Line\nfrom svgwrite import Drawing, rgb\nimport argparse\nfrom math import atan, asin, sin, cos, pi\nfrom numpy import argmin\nfrom utils import calc_overall_bbox\n\nparser = argparse.ArgumentParser(\n description='Generate a merged piece from two pieces by stretching the pattern piece along an edge')\nparser.add_argument('--filename', type=str,\n help='The filename of the svg with at least two pattern pieces.')\n\n\nclass Intersection(object):\n def __init__(self, point=1.0+1.0*1j, diff=0.0):\n self.point = point\n self.diff = diff\n\n\nclass PathClip(object):\n def __init__(self, index=0, t=0.0, target=1.0+1.0*1j):\n self.index = index\n self.t = t\n self.target = target\n\n\ndef flatten_shape(i, all_paths, merge_paths):\n dwg = Drawing(\"merge_output%s.svg\" % i, profile='tiny')\n\n def draw_line(start, end, offset=0.0):\n start += offset\n end += offset\n dwg.add(dwg.line(start=(start.real, start.imag), end=(end.real, end.imag),\n stroke_width=4, stroke=rgb(255, 0, 0)))\n\n dwg.add(dwg.path(**{'d': all_paths[i].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[i].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(255, 0, 0)}))\n bbox = calc_overall_bbox(all_paths[i])\n width, height = abs(bbox[1] - bbox[0]), abs(bbox[3] - bbox[2])\n margin = 40\n lower = min(bbox[2], bbox[3]) + height+margin\n left = min(bbox[0], bbox[1]) + margin\n\n def draw_marker(loc, col=rgb(255, 0, 0), offset=(left, lower)):\n dwg.add(dwg.circle(center=(loc.real + offset[0], loc.imag + offset[1]), r=4,\n fill=col))\n\n max_axis = max(width, height)\n num_lines = 10\n points = [merge_paths[i].point(j / num_lines) for j in range(num_lines)] + [\n merge_paths[i].point(1.0)]\n angles = [\n asin((points[j + 1].imag - points[j].imag) / abs(points[j + 1] - points[j]))\n for j in range(num_lines)]\n\n ends = [max_axis * (sin(angle) + cos(angle) * 1j) for angle in\n angles]\n intersection_clips = []\n for j, end in enumerate(ends):\n end_point = end + points[j]\n intersections = other_paths[i].intersect(Line(start=points[j], end=end_point))\n\n for intersection in intersections[0]:\n intersection_point = intersection[1].point(intersection[2])\n target = merge_paths[i].length()*(1-j/num_lines) + abs(intersection_point - points[j])*1j\n intersection_clips.append(PathClip(index=other_paths[i].index(intersection[1]),\n t=intersection[2],\n target=target))\n if j % 10 == 0:\n draw_line(points[j], intersection_point)\n draw_marker(intersection_point, rgb(0, 255, 0), (0, 0))\n break\n\n # make the flexed points by chopping the chunks of the other paths out, then\n # translating and rotating them such that their end points line up with the diff lines\n def transform_side(sides, targets, angle_offset=0):\n def angle(point1, point2):\n diff = point1-point2\n if diff.real == 0:\n return 90.0\n return atan(diff.imag / diff.real)*180.0/pi\n # change this so that it has two targets\n transformed_side = Path(*sides)\n source_angle = angle(transformed_side.end, transformed_side.start) - \\\n angle(targets[0], targets[1])\n transformed_side = transformed_side.rotated(-source_angle+angle_offset)\n source = transformed_side.end if angle_offset == 0 else transformed_side.start\n diff = targets[1] - source\n transformed_side = transformed_side.translated(diff)\n draw_marker(targets[0], rgb(0, 200, 200))\n draw_marker(targets[1], rgb(0, 255, 255))\n transformed_diff = abs(transformed_side.start - transformed_side.end)\n targets_diff = abs(targets[0]-targets[1])\n if transformed_diff < targets_diff :\n transformed_side.insert(0, Line(start=targets[0],\n end=transformed_side.start))\n elif transformed_diff > targets_diff:\n # pop elements off until the transformed diff is smaller\n while transformed_diff > targets_diff:\n transformed_side.pop(0)\n transformed_diff = abs(transformed_side.start - transformed_side.end)\n print(\"path\", transformed_side)\n print(\"path is longer\", transformed_diff-targets_diff)\n return transformed_side\n\n start_index = 0\n curr_t = 0\n flexed_path = []\n t_resolution = 0.01\n if intersection_clips[0].index > intersection_clips[-1].index or \\\n (intersection_clips[0].index == intersection_clips[-1].index and\n intersection_clips[0].t > intersection_clips[-1].t):\n intersection_clips.reverse()\n # add the end of the shape to the intersection clips\n intersection_clips.append(PathClip(index=len(other_paths[i])-1, t=1.0,\n target=merge_paths[i].length()))\n last_target = 0\n for clip in intersection_clips:\n sides = []\n print(\"boundaries\", start_index, clip.index, curr_t, clip.t)\n upper_t = clip.t if start_index == clip.index else 1.0\n while start_index <= clip.index and curr_t < upper_t:\n curr_seg = other_paths[i][start_index]\n while curr_t < upper_t:\n max_t = curr_t + t_resolution if curr_t+t_resolution < clip.t else clip.t\n sides.append(Line(start=curr_seg.point(curr_t),\n end=curr_seg.point(max_t)))\n curr_t += t_resolution\n curr_t = upper_t\n if start_index != clip.index:\n curr_t = 0.0\n if upper_t == 1.0:\n start_index += 1\n upper_t = clip.t if start_index == clip.index else 1.0\n if len(sides) != 0:\n flexed_path.append(transform_side(sides, [last_target, clip.target]))\n last_target = clip.target\n\n straight_path = [Line(start=0, end=merge_paths[i].length())]\n for p in flexed_path:\n p = p.translated(left+lower*1j)\n dwg.add(dwg.path(d=p.d(), fill=\"none\", stroke_width=4,\n stroke=rgb(255, 0, 0)))\n\n transformed_path = flexed_path + straight_path\n transformed_path = Path(*transformed_path).translated(left + lower*1j)\n dwg.add(dwg.path(d=transformed_path.d(), fill=\"none\", stroke_width=4,\n stroke=rgb(0, 0, 0)))\n bbox = calc_overall_bbox(list(all_paths[i]) + list(transformed_path))\n\n width, height = abs(bbox[1] - bbox[0]), abs(bbox[3] - bbox[2])\n dwg.viewbox(min(bbox[0], bbox[1]), min(bbox[2], bbox[3]), width, height)\n dwg.save()\n return flexed_path\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n all_paths, attributes = svg2paths(args.filename)\n # how do we figure out what sections of the path are linked?\n diffs = [[abs(i.start - j.start) for j in all_paths[0]] for i in\n all_paths[1]]\n # get the location of the lowest value of the diffs - this will tell us the offset\n diff_min = [argmin(diff) for diff in diffs]\n offset_diffs = [diff_min[i + 1] - diff_min[i] for i in range(len(diff_min) - 1)]\n # pull out the longest contiguous section of 1s\n start_one = offset_diffs.index(1)\n end_one = offset_diffs[::-1].index(1)\n # for each of the shapes, construct a new shape where the section in the merge paths\n # is straight\n merge_paths = [Path(*list(all_paths[i])[start_one:end_one]) for i in range(0, 2)]\n other_paths = [Path(*list(all_paths[i])[end_one:]+list(all_paths[i])[0:start_one])\n for i in range(0, 2)]\n flexed_paths = [flatten_shape(i, all_paths, merge_paths) for i in range(0, 2)]\n dwg = Drawing(\"flexed_sides.svg\", profile=\"tiny\")\n upper_sizes = [0, 0]\n for i, path_list in enumerate(flexed_paths):\n bbox = calc_overall_bbox(path_list)\n if i == 0:\n upper_sizes = [max(bbox[0], bbox[1]), abs(bbox[3] - bbox[2])]\n transform = \"scale(1, {})\".format(-1 if i == 0 else 1)\n group = dwg.add(dwg.g(transform=transform))\n for path in path_list:\n path = path.translated(-min(bbox[2], bbox[3])*1j)\n group.add(dwg.path(**{'d': path.d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 0, 0)}))\n bbox = calc_overall_bbox(flexed_paths[1])\n dwg.viewbox(min(bbox[0], bbox[1]), -upper_sizes[1],\n abs(min(bbox[0], bbox[1]) -max(bbox[0], bbox[1], upper_sizes[0])),\n abs(bbox[3] - bbox[2])+upper_sizes[1])\n dwg.save()\n # render the shapes selected\n dwg = Drawing(\"merge_output.svg\", profile='tiny')\n for path in all_paths:\n dwg.add(dwg.path(\n **{'d': path.d(), 'fill': \"none\", 'stroke-width': 4, 'stroke': rgb(0, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[0].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(255, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[1].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 255, 0)}))\n bbox = calc_overall_bbox([x for x in all_paths[0]] + [x for x in all_paths[1]])\n dwg.viewbox(min(bbox[0], bbox[1]), min(bbox[2], bbox[3]), abs(bbox[1] - bbox[0]),\n abs(bbox[3] - bbox[2]))\n dwg.save()\n" ]
[ [ "numpy.argmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
furminator/Furminator-MCPE-Tool
[ "4fe247351503781db2012815c1e40e881d9e1bba" ]
[ "viewports/camera.py" ]
[ "# -*- coding: utf_8 -*-\n# The above line is necessary, unless we want problems with encodings...\nimport sys\nfrom compass import CompassOverlay\nfrom raycaster import TooFarException\nimport raycaster\nimport keys\nimport pygame\n\nimport math\nimport copy\nimport numpy\nfrom config import config\nimport frustum\nimport logging\nimport glutils\nimport mceutils\nimport itertools\nimport pymclevel\n\nfrom math import isnan\nfrom datetime import datetime, timedelta\n\nfrom OpenGL import GL\nfrom OpenGL import GLU\n\nfrom albow import alert, AttrRef, Button, Column, input_text, Row, TableColumn, TableView, Widget, CheckBox, \\\n TextFieldWrapped, MenuButton, ChoiceButton, IntInputRow, TextInputRow, showProgress, IntField, ask\nfrom albow.controls import Label, ValueDisplay\nfrom albow.dialogs import Dialog, wrapped_label\nfrom albow.openglwidgets import GLViewport\nfrom albow.extended_widgets import BasicTextInputRow, CheckBoxLabel\nfrom albow.translate import _\nfrom albow.root import get_top_widget\nfrom pygame import mouse\nfrom depths import DepthOffset\nfrom editortools.operation import Operation\nfrom glutils import gl\nfrom editortools.nbtexplorer import SlotEditor\n\nclass SignEditOperation(Operation):\n def __init__(self, tool, level, tileEntity, backupTileEntity):\n self.tool = tool\n self.level = level\n self.tileEntity = tileEntity\n self.undoBackupEntityTag = backupTileEntity\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(self.tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(self.tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(self.tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(self.tileEntity), (1, 1, 1))\n\nclass CameraViewport(GLViewport):\n anchor = \"tlbr\"\n\n oldMousePosition = None\n dontShowMessageAgain = False\n\n def __init__(self, editor, def_enc=None):\n self.editor = editor\n global DEF_ENC\n DEF_ENC = def_enc or editor.mcedit.def_enc\n rect = editor.mcedit.rect\n GLViewport.__init__(self, rect)\n\n # Declare a pseudo showCommands function, since it is called by other objects before its creation in mouse_move.\n self.showCommands = lambda:None\n\n near = 0.5\n far = 4000.0\n\n self.near = near\n self.far = far\n\n self.brake = False\n self.lastTick = datetime.now()\n # self.nearheight = near * tang\n\n self.cameraPosition = (16., 45., 16.)\n self.velocity = [0., 0., 0.]\n\n self.yaw = -45. # degrees\n self._pitch = 0.1\n\n self.cameraVector = self._cameraVector()\n\n # A state machine to dodge an apparent bug in pygame that generates erroneous mouse move events\n # 0 = bad event already happened\n # 1 = app just started or regained focus since last bad event\n # 2 = mouse cursor was hidden after state 1, next event will be bad\n self.avoidMouseJumpBug = 1\n\n config.settings.drawSky.addObserver(self)\n config.settings.drawFog.addObserver(self)\n config.settings.superSecretSettings.addObserver(self)\n config.settings.showCeiling.addObserver(self)\n config.controls.cameraAccel.addObserver(self, \"accelFactor\")\n config.controls.cameraMaxSpeed.addObserver(self, \"maxSpeed\")\n config.controls.cameraBrakingSpeed.addObserver(self, \"brakeMaxSpeed\")\n config.controls.invertMousePitch.addObserver(self)\n config.controls.autobrake.addObserver(self)\n config.controls.swapAxes.addObserver(self)\n config.settings.compassToggle.addObserver(self)\n\n config.settings.fov.addObserver(self, \"fovSetting\", callback=self.updateFov)\n\n self.mouseVector = (0, 0, 0)\n\n self.root = self.get_root()\n self.hoveringCommandBlock = [False, \"\"]\n self.block_info_parsers = None\n # self.add(DebugDisplay(self, \"cameraPosition\", \"blockFaceUnderCursor\", \"mouseVector\", \"mouse3dPoint\"))\n\n @property\n def pitch(self):\n return self._pitch\n\n @pitch.setter\n def pitch(self, val):\n self._pitch = min(89.999, max(-89.999, val))\n\n def updateFov(self, val=None):\n hfov = self.fovSetting\n fov = numpy.degrees(2.0 * numpy.arctan(self.size[0] / self.size[1] * numpy.tan(numpy.radians(hfov) * 0.5)))\n\n self.fov = fov\n self.tang = numpy.tan(numpy.radians(fov))\n\n def stopMoving(self):\n self.velocity = [0, 0, 0]\n\n def brakeOn(self):\n self.brake = True\n\n def brakeOff(self):\n self.brake = False\n\n tickInterval = 1000 / config.settings.targetFPS.get()\n\n oldPosition = (0, 0, 0)\n\n flyMode = config.settings.flyMode.property()\n\n def tickCamera(self, frameStartTime, inputs, inSpace):\n timePassed = (frameStartTime - self.lastTick).microseconds\n if timePassed <= self.tickInterval * 1000 or not pygame.key.get_focused():\n return\n\n self.lastTick = frameStartTime\n timeDelta = float(timePassed) / 1000000.\n timeDelta = min(timeDelta, 0.125) # 8fps lower limit!\n drag = config.controls.cameraDrag.get()\n accel_factor = drag + config.controls.cameraAccel.get()\n\n # if we're in space, move faster\n\n drag_epsilon = 10.0 * timeDelta\n\n if self.brake:\n max_speed = self.brakeMaxSpeed\n else:\n max_speed = self.maxSpeed\n\n if inSpace or self.root.sprint:\n accel_factor *= 3.0\n max_speed *= 3.0\n self.root.sprint = False\n elif config.settings.viewMode.get() == \"Chunk\":\n accel_factor *= 2.0\n max_speed *= 2.0\n\n pi = self.editor.cameraPanKeys\n mouseSpeed = config.controls.mouseSpeed.get()\n self.yaw += pi[0] * mouseSpeed\n self.pitch += pi[1] * mouseSpeed\n\n if config.settings.viewMode.get() == \"Chunk\":\n (dx, dy, dz) = (0, -0.25, -1)\n self.yaw = -180\n self.pitch = 10\n elif self.flyMode:\n (dx, dy, dz) = self._anglesToVector(self.yaw, 0)\n elif self.swapAxes:\n p = self.pitch\n if p > 80:\n p = 0\n\n (dx, dy, dz) = self._anglesToVector(self.yaw, p)\n\n else:\n (dx, dy, dz) = self._cameraVector()\n\n velocity = self.velocity # xxx learn to use matrix/vector libs\n i = inputs\n yaw = numpy.radians(self.yaw)\n cosyaw = -numpy.cos(yaw)\n sinyaw = numpy.sin(yaw)\n\n directedInputs = mceutils.normalize((\n i[0] * cosyaw + i[2] * dx,\n i[1] + i[2] * dy,\n i[2] * dz - i[0] * sinyaw,\n ))\n\n # give the camera an impulse according to the state of the inputs and in the direction of the camera\n cameraAccel = map(lambda x: x * accel_factor * timeDelta, directedInputs)\n # cameraImpulse = map(lambda x: x*impulse_factor, directedInputs)\n\n newVelocity = map(lambda a, b: a + b, velocity, cameraAccel)\n velocityDir, speed = mceutils.normalize_size(newVelocity)\n\n # apply drag\n if speed:\n if self.autobrake and not any(inputs):\n speed *= 0.15\n else:\n\n sign = speed / abs(speed)\n speed = abs(speed)\n speed = speed - (drag * timeDelta)\n if speed < 0.0:\n speed = 0.0\n speed *= sign\n\n speed = max(-max_speed, min(max_speed, speed))\n\n if abs(speed) < drag_epsilon:\n speed = 0\n\n velocity = map(lambda a: a * speed, velocityDir)\n\n # velocity = map(lambda p,d: p + d, velocity, cameraImpulse)\n d = map(lambda a, b: abs(a - b), self.cameraPosition, self.oldPosition)\n if d[0] + d[2] > 32.0:\n self.oldPosition = self.cameraPosition\n self.updateFloorQuad()\n\n self.cameraPosition = map(lambda p, d: p + d * timeDelta, self.cameraPosition, velocity)\n if self.cameraPosition[1] > 3800.:\n self.cameraPosition[1] = 3800.\n elif self.cameraPosition[1] < -1000.:\n self.cameraPosition[1] = -1000.\n\n self.velocity = velocity\n self.cameraVector = self._cameraVector()\n\n self.editor.renderer.position = self.cameraPosition\n if self.editor.currentTool.previewRenderer:\n self.editor.currentTool.previewRenderer.position = self.cameraPosition\n\n def setModelview(self):\n pos = self.cameraPosition\n look = numpy.array(self.cameraPosition)\n look = look.astype(float) + self.cameraVector\n up = (0, 1, 0)\n GLU.gluLookAt(pos[0], pos[1], pos[2],\n look[0], look[1], look[2],\n up[0], up[1], up[2])\n\n def _cameraVector(self):\n return self._anglesToVector(self.yaw, self.pitch)\n\n @staticmethod\n def _anglesToVector(yaw, pitch):\n def nanzero(x):\n if isnan(x):\n return 0\n else:\n return x\n\n dx = -math.sin(math.radians(yaw)) * math.cos(math.radians(pitch))\n dy = -math.sin(math.radians(pitch))\n dz = math.cos(math.radians(yaw)) * math.cos(math.radians(pitch))\n return map(nanzero, [dx, dy, dz])\n\n def updateMouseVector(self):\n self.mouseVector = self._mouseVector()\n\n def _mouseVector(self):\n \"\"\"\n returns a vector reflecting a ray cast from the camera\n position to the mouse position on the near plane\n \"\"\"\n x, y = mouse.get_pos()\n # if (x, y) not in self.rect:\n # return (0, 0, 0); # xxx\n\n y = self.root.height - y\n point1 = unproject(x, y, 0.0)\n point2 = unproject(x, y, 1.0)\n v = numpy.array(point2) - point1\n v = mceutils.normalize(v)\n return v\n\n def _blockUnderCursor(self, center=False):\n \"\"\"\n returns a point in 3d space that was determined by\n reading the depth buffer value\n \"\"\"\n try:\n GL.glReadBuffer(GL.GL_BACK)\n except Exception:\n logging.exception('Exception during glReadBuffer')\n ws = self.root.size\n if center:\n x, y = ws\n x //= 2\n y //= 2\n else:\n x, y = mouse.get_pos()\n if (x < 0 or y < 0 or x >= ws[0] or\n y >= ws[1]):\n return 0, 0, 0\n\n y = ws[1] - y\n\n try:\n pixel = GL.glReadPixels(x, y, 1, 1, GL.GL_DEPTH_COMPONENT, GL.GL_FLOAT)\n newpoint = unproject(x, y, pixel[0])\n except Exception:\n return 0, 0, 0\n\n return newpoint\n\n def updateBlockFaceUnderCursor(self):\n focusPair = None\n if not self.enableMouseLag or self.editor.frames & 1:\n self.updateMouseVector()\n if self.editor.mouseEntered:\n if not self.mouseMovesCamera:\n try:\n focusPair = raycaster.firstBlock(self.cameraPosition, self._mouseVector(), self.editor.level,\n 100, config.settings.viewMode.get())\n except TooFarException:\n mouse3dPoint = self._blockUnderCursor()\n focusPair = self._findBlockFaceUnderCursor(mouse3dPoint)\n elif self.editor.longDistanceMode:\n mouse3dPoint = self._blockUnderCursor(True)\n focusPair = self._findBlockFaceUnderCursor(mouse3dPoint)\n\n # otherwise, find the block at a controllable distance in front of the camera\n if focusPair is None:\n if self.blockFaceUnderCursor is None or self.mouseMovesCamera:\n focusPair = (self.getCameraPoint(), (0, 0, 0))\n else:\n focusPair = self.blockFaceUnderCursor\n\n try:\n if focusPair[0] is not None and self.editor.level.tileEntityAt(*focusPair[0]):\n changed = False\n te = self.editor.level.tileEntityAt(*focusPair[0])\n backupTE = copy.deepcopy(te)\n if te[\"id\"].value == \"Sign\" or self.editor.level.defsIds.mcedit_ids.get(te[\"id\"].value) in (\"DEF_BLOCKS_STANDING_SIGN\", \"DEFS_BLOCKS_WALL_SIGN\"):\n if \"Text1\" in te and \"Text2\" in te and \"Text3\" in te and \"Text4\" in te:\n for i in xrange(1,5):\n if len(te[\"Text\"+str(i)].value) > 32767:\n te[\"Text\"+str(i)] = pymclevel.TAG_String(str(te[\"Text\"+str(i)].value)[:32767])\n changed = True\n if changed:\n response = None\n if not self.dontShowMessageAgain:\n response = ask(\"Found a sign that exceeded the maximum character limit. Automatically trimmed the sign to prevent crashes.\", responses=[\"Ok\", \"Don't show this again\"])\n if response is not None and response == \"Don't show this again\":\n self.dontShowMessageAgain = True\n op = SignEditOperation(self.editor, self.editor.level, te, backupTE)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n except:\n pass\n\n self.blockFaceUnderCursor = focusPair\n\n def _findBlockFaceUnderCursor(self, projectedPoint):\n \"\"\"Returns a (pos, Face) pair or None if one couldn't be found\"\"\"\n d = [0, 0, 0]\n\n try:\n intProjectedPoint = map(int, map(numpy.floor, projectedPoint))\n except ValueError:\n return None # catch NaNs\n intProjectedPoint[1] = max(-1, intProjectedPoint[1])\n\n # find out which face is under the cursor. xxx do it more precisely\n faceVector = ((projectedPoint[0] - (intProjectedPoint[0] + 0.5)),\n (projectedPoint[1] - (intProjectedPoint[1] + 0.5)),\n (projectedPoint[2] - (intProjectedPoint[2] + 0.5))\n )\n\n av = map(abs, faceVector)\n\n i = av.index(max(av))\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n\n potentialOffsets = []\n\n try:\n block = self.editor.level.blockAt(*intProjectedPoint)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n return intProjectedPoint, d\n\n if block == pymclevel.alphaMaterials.SnowLayer.ID:\n potentialOffsets.append((0, 1, 0))\n else:\n # discard any faces that aren't likely to be exposed\n for face, offsets in pymclevel.faceDirections:\n point = map(lambda a, b: a + b, intProjectedPoint, offsets)\n try:\n neighborBlock = self.editor.level.blockAt(*point)\n if block != neighborBlock:\n potentialOffsets.append(offsets)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n pass\n\n # check each component of the face vector to see if that face is exposed\n if tuple(d) not in potentialOffsets:\n av[i] = 0\n i = av.index(max(av))\n d = [0, 0, 0]\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n if tuple(d) not in potentialOffsets:\n av[i] = 0\n i = av.index(max(av))\n d = [0, 0, 0]\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n\n if tuple(d) not in potentialOffsets:\n if len(potentialOffsets):\n d = potentialOffsets[0]\n else:\n # use the top face as a fallback\n d = [0, 1, 0]\n\n return intProjectedPoint, d\n\n @property\n def ratio(self):\n return self.width / float(self.height)\n\n startingMousePosition = None\n\n def mouseLookOn(self):\n self.root.capture_mouse(self)\n self.focus_switch = None\n self.startingMousePosition = mouse.get_pos()\n\n if self.avoidMouseJumpBug == 1:\n self.avoidMouseJumpBug = 2\n\n def mouseLookOff(self):\n self.root.capture_mouse(None)\n if self.startingMousePosition:\n mouse.set_pos(*self.startingMousePosition)\n self.startingMousePosition = None\n\n @property\n def mouseMovesCamera(self):\n return self.root.captured_widget is not None\n\n def toggleMouseLook(self):\n if not self.mouseMovesCamera:\n self.mouseLookOn()\n else:\n self.mouseLookOff()\n\n # mobs is overrided in __init__\n mobs = pymclevel.Entity.monsters + [\"[Custom]\"]\n\n @mceutils.alertException\n def editMonsterSpawner(self, point):\n mobs = self.mobs\n _mobs = {}\n # Get the mobs from the versionned data\n defsIds = self.editor.level.defsIds\n mcedit_defs = defsIds.mcedit_defs\n mcedit_ids = defsIds.mcedit_ids\n if mcedit_defs.get('spawner_monsters'):\n mobs = []\n for a in mcedit_defs['spawner_monsters']:\n _id = mcedit_ids[a]\n name = _(mcedit_defs[_id]['name'])\n _mobs[name] = a\n _mobs[a] = name\n mobs.append(name)\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(mcedit_defs.get(\"MobSpawner\", \"MobSpawner\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Delay\"] = pymclevel.TAG_Short(120)\n tileEntity[\"EntityId\"] = pymclevel.TAG_String(mcedit_defs.get(mobs[0], mobs[0]))\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def addMob(id):\n if id not in mobs:\n mobs.insert(0, id)\n mobTable.selectedIndex = 0\n\n def selectTableRow(i, evt):\n if mobs[i] == \"[Custom]\":\n id = input_text(\"Type in an EntityID for this spawner. Invalid IDs may crash Minecraft.\", 150)\n if id:\n addMob(id)\n else:\n return\n mobTable.selectedIndex = mobs.index(id)\n else:\n mobTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n mobTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n mobTable.num_rows = lambda: len(mobs)\n mobTable.row_data = lambda i: (mobs[i],)\n mobTable.row_is_selected = lambda x: x == mobTable.selectedIndex\n mobTable.click_row = selectTableRow\n mobTable.selectedIndex = 0\n\n def selectedMob():\n val = mobs[mobTable.selectedIndex]\n return _mobs.get(val, val)\n\n def cancel():\n mobs[mobTable.selectedIndex] = id\n panel.dismiss()\n\n if \"EntityId\" in tileEntity:\n _id = tileEntity[\"EntityId\"].value\n elif \"SpawnData\" in tileEntity:\n _id = tileEntity[\"SpawnData\"][\"id\"].value\n else:\n _id = \"[Custom]\"\n\n # Something weird here since the first implementation of the versionned definition.\n # It may happen 'mcedit_defs.get(mcedit_ids.get(_id, _id), {}).get(\"name\", _id)'\n # does not return the wanted data (dict).\n # Could not yet debug that, but I guess it is related to the versionned data loading...\n # -- D.C.-G.\n # print mcedit_ids.get(_id, _id)\n # print mcedit_defs.get(mcedit_ids.get(_id, _id), {})\n _id2 = mcedit_defs.get(mcedit_ids.get(_id, _id), {})\n if isinstance(_id2, (str, unicode)):\n _id = _id2\n id = mcedit_defs.get(mcedit_ids.get(_id, _id), {}).get(\"name\", _id)\n\n addMob(id)\n\n mobTable.selectedIndex = mobs.index(id)\n oldChoiceCol = Column((Label(_(\"Current: \") + _mobs.get(id, id), align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedMob()), mobTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class MonsterSpawnerEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != selectedMob():\n # If the level has a 'setSpawnerData, call it instead of using the code here\n if hasattr(self.editor.level, \"setSpawnerData\"):\n tileEntity = self.editor.level.setSpawnerData(tileEntity, selectedMob())\n else:\n if \"EntityId\" in tileEntity:\n tileEntity[\"EntityId\"] = pymclevel.TAG_String(selectedMob())\n if \"SpawnData\" in tileEntity:\n # Try to not clear the spawn data, but only update the mob id\n # tileEntity[\"SpawnData\"] = pymclevel.TAG_Compound()\n tag_id = pymclevel.TAG_String(selectedMob())\n if \"id\" in tileEntity[\"SpawnData\"]:\n tag_id.name = \"id\"\n tileEntity[\"SpawnData\"][\"id\"] = tag_id\n if \"EntityId\" in tileEntity[\"SpawnData\"]:\n tileEntity[\"SpawnData\"][\"EntityId\"] = tag_id\n if \"SpawnPotentials\" in tileEntity:\n for potential in tileEntity[\"SpawnPotentials\"]:\n if \"Entity\" in potential:\n # MC 1.9+\n if potential[\"Entity\"][\"id\"].value == id or (\"EntityId\" in potential[\"Entity\"] and potential[\"Entity\"][\"EntityId\"].value == id):\n potential[\"Entity\"] = pymclevel.TAG_Compound()\n potential[\"Entity\"][\"id\"] = pymclevel.TAG_String(selectedMob())\n elif \"Properties\" in potential:\n # MC before 1.9\n if \"Type\" in potential and potential[\"Type\"].value == id:\n potential[\"Type\"] = pymclevel.TAG_String(selectedMob())\n # We also can change some other values in the Properties tag, but it is useless in MC 1.8+.\n # The fact is this data will not be updated by the game after the mob type is changed, but the old mob will not spawn.\n # put_entityid = False\n # put_id = False\n # if \"EntityId\" in potential[\"Properties\"] and potential[\"Properties\"][\"EntityId\"].value == id:\n # put_entityid = True\n # if \"id\" in potential[\"Properties\"] and potential[\"Properties\"][\"id\"].value == id:\n # put_id = True\n # new_props = pymclevel.TAG_Compound()\n # if put_entityid:\n # new_props[\"EntityId\"] = pymclevel.TAG_String(selectedMob())\n # if put_id:\n # new_props[\"id\"] = pymclevel.TAG_String(selectedMob())\n # potential[\"Properties\"] = new_props\n op = MonsterSpawnerEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editJukebox(self, point):\n discs = {\n \"[No Record]\": None,\n \"13\": 2256,\n \"cat\": 2257,\n \"blocks\": 2258,\n \"chirp\": 2259,\n \"far\": 2260,\n \"mall\": 2261,\n \"mellohi\": 2262,\n \"stal\": 2263,\n \"strad\": 2264,\n \"ward\": 2265,\n \"11\": 2266,\n \"wait\": 2267\n }\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(\"RecordPlayer\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def selectTableRow(i, evt):\n discTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n discTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n discTable.num_rows = lambda: len(discs)\n discTable.row_data = lambda i: (selectedDisc(i),)\n discTable.row_is_selected = lambda x: x == discTable.selectedIndex\n discTable.click_row = selectTableRow\n discTable.selectedIndex = 0\n\n def selectedDisc(id):\n if id == 0:\n return \"[No Record]\"\n return discs.keys()[discs.values().index(id + 2255)]\n\n def cancel():\n if id == \"[No Record]\":\n discTable.selectedIndex = 0\n else:\n discTable.selectedIndex = discs[id] - 2255\n panel.dismiss()\n\n if \"RecordItem\" in tileEntity:\n if tileEntity[\"RecordItem\"][\"id\"].value == \"minecraft:air\":\n id = \"[No Record]\"\n else:\n id = tileEntity[\"RecordItem\"][\"id\"].value[17:]\n elif \"Record\" in tileEntity:\n if tileEntity[\"Record\"].value == 0:\n id = \"[No Record]\"\n else:\n id = selectedDisc(tileEntity[\"Record\"].value - 2255)\n else:\n id = \"[No Record]\"\n\n if id == \"[No Record]\":\n discTable.selectedIndex = 0\n else:\n discTable.selectedIndex = discs[id] - 2255\n\n oldChoiceCol = Column((Label(_(\"Current: \") + id, align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedDisc(discTable.selectedIndex)), discTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class JukeboxEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != selectedDisc(discTable.selectedIndex):\n if \"RecordItem\" in tileEntity:\n del tileEntity[\"RecordItem\"]\n if discTable.selectedIndex == 0:\n tileEntity[\"Record\"] = pymclevel.TAG_Int(0)\n self.editor.level.setBlockDataAt(tileEntity[\"x\"].value, tileEntity[\"y\"].value, tileEntity[\"z\"].value, 0)\n else:\n tileEntity[\"Record\"] = pymclevel.TAG_Int(discTable.selectedIndex + 2255)\n self.editor.level.setBlockDataAt(tileEntity[\"x\"].value, tileEntity[\"y\"].value, tileEntity[\"z\"].value, 1)\n op = JukeboxEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editNoteBlock(self, point):\n notes = [\n \"F# (0.5)\", \"G (0.53)\", \"G# (0.56)\",\n \"A (0.6)\", \"A# (0.63)\", \"B (0.67)\",\n \"C (0.7)\", \"C# (0.75)\", \"D (0.8)\",\n \"D# (0.85)\", \"E (0.9)\", \"F (0.95)\",\n \"F# (1.0)\", \"G (1.05)\", \"G# (1.1)\",\n \"A (1.2)\", \"A# (1.25)\", \"B (1.32)\",\n \"C (1.4)\", \"C# (1.5)\", \"D (1.6)\",\n \"D# (1.7)\", \"E (1.8)\", \"F (1.9)\",\n \"F# (2.0)\"\n ]\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"MobSpawner\", \"MobSpawner\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"note\"] = pymclevel.TAG_Byte(0)\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def selectTableRow(i, evt):\n noteTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n noteTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n noteTable.num_rows = lambda: len(notes)\n noteTable.row_data = lambda i: (notes[i],)\n noteTable.row_is_selected = lambda x: x == noteTable.selectedIndex\n noteTable.click_row = selectTableRow\n noteTable.selectedIndex = 0\n\n def selectedNote():\n return notes[noteTable.selectedIndex]\n\n def cancel():\n noteTable.selectedIndex = id\n panel.dismiss()\n\n id = tileEntity[\"note\"].value\n\n noteTable.selectedIndex = id\n\n oldChoiceCol = Column((Label(_(\"Current: \") + notes[id], align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedNote()), noteTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class NoteBlockEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != noteTable.selectedIndex:\n tileEntity[\"note\"] = pymclevel.TAG_Byte(noteTable.selectedIndex)\n op = NoteBlockEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editSign(self, point):\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n linekeys = [\"Text\" + str(i) for i in xrange(1, 5)]\n\n # From version 1.8, signs accept Json format.\n # 1.9 does no more support the old raw string fomat.\n splitVersion = self.editor.level.gameVersion.split('.')\n newFmtVersion = ['1','9']\n fmt = \"\"\n json_fmt = False\n\n f = lambda a,b: (a + (['0'] * max(len(b) - len(a), 0)), b + (['0'] * max(len(a) - len(b), 0)))\n if False not in map(lambda x,y: (int(x) if x.isdigit() else x) >= (int(y) if y.isdigit() else y),*f(splitVersion, newFmtVersion))[:2]:\n json_fmt = True\n fmt = '{\"text\":\"\"}'\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n # Don't know how to handle the difference between wall and standing signs for now...\n # Just let this like it is until we can find the way!\n tileEntity[\"id\"] = pymclevel.TAG_String(\"Sign\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n for l in linekeys:\n tileEntity[l] = pymclevel.TAG_String(fmt)\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n lineFields = [TextFieldWrapped(width=400) for l in linekeys]\n for l, f in zip(linekeys, lineFields):\n\n f.value = tileEntity[l].value\n\n # Double quotes handling for olf sign text format.\n if f.value == 'null':\n f.value = fmt\n elif json_fmt and f.value == '':\n f.value = fmt\n else:\n if f.value.startswith('\"') and f.value.endswith('\"'):\n f.value = f.value[1:-1]\n if '\\\\\"' in f.value:\n f.value = f.value.replace('\\\\\"', '\"')\n\n colors = [\n u\"§0 Black\",\n u\"§1 Dark Blue\",\n u\"§2 Dark Green\",\n u\"§3 Dark Aqua\",\n u\"§4 Dark Red\",\n u\"§5 Dark Purple\",\n u\"§6 Gold\",\n u\"§7 Gray\",\n u\"§8 Dark Gray\",\n u\"§9 Blue\",\n u\"§a Green\",\n u\"§b Aqua\",\n u\"§c Red\",\n u\"§d Light Purple\",\n u\"§e Yellow\",\n u\"§f White\",\n ]\n\n def menu_picked(index):\n c = u\"§%d\"%index\n currentField = panel.focus_switch.focus_switch\n currentField.text += c # xxx view hierarchy\n currentField.insertion_point = len(currentField.text)\n\n def changeSign():\n unsavedChanges = False\n fmt = '\"{}\"'\n u_fmt = u'\"%s\"'\n if json_fmt:\n fmt = '{}'\n u_fmt = u'%s'\n for l, f in zip(linekeys, lineFields):\n oldText = fmt.format(tileEntity[l])\n tileEntity[l] = pymclevel.TAG_String(u_fmt%f.value[:255])\n if fmt.format(tileEntity[l]) != oldText and not unsavedChanges:\n unsavedChanges = True\n if unsavedChanges:\n op = SignEditOperation(self.editor, self.editor.level, tileEntity, undoBackupEntityTag)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n panel.dismiss()\n\n colorMenu = MenuButton(\"Add Color Code...\", colors, menu_picked=menu_picked)\n\n row = Row((Button(\"OK\", action=changeSign), Button(\"Cancel\", action=panel.dismiss)))\n\n column = [Label(\"Edit Sign\")] + lineFields + [colorMenu, row]\n\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editSkull(self, point):\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n skullTypes = {\n \"Skeleton\": 0,\n \"Wither Skeleton\": 1,\n \"Zombie\": 2,\n \"Player\": 3,\n \"Creeper\": 4,\n }\n\n inverseSkullType = {\n 0: \"Skeleton\",\n 1: \"Wither Skeleton\",\n 2: \"Zombie\",\n 3: \"Player\",\n 4: \"Creeper\",\n }\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n # Don't know how to handle the difference between skulls in this context signs for now...\n # Tests nedded!\n tileEntity[\"id\"] = pymclevel.TAG_String(\"Skull\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"SkullType\"] = pymclevel.TAG_Byte(3)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Skull Data\")\n usernameField = TextFieldWrapped(width=150)\n panel = Dialog()\n skullMenu = ChoiceButton(map(str, skullTypes))\n\n if \"Owner\" in tileEntity:\n usernameField.value = str(tileEntity[\"Owner\"][\"Name\"].value)\n elif \"ExtraType\" in tileEntity:\n usernameField.value = str(tileEntity[\"ExtraType\"].value)\n else:\n usernameField.value = \"\"\n\n oldUserName = usernameField.value\n skullMenu.selectedChoice = inverseSkullType[tileEntity[\"SkullType\"].value]\n oldSelectedSkull = skullMenu.selectedChoice\n\n class SkullEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateSkull():\n if usernameField.value != oldUserName or oldSelectedSkull != skullMenu.selectedChoice:\n tileEntity[\"ExtraType\"] = pymclevel.TAG_String(usernameField.value)\n tileEntity[\"SkullType\"] = pymclevel.TAG_Byte(skullTypes[skullMenu.selectedChoice])\n if \"Owner\" in tileEntity:\n del tileEntity[\"Owner\"]\n op = SkullEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBTN = Button(\"OK\", action=updateSkull)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n\n column = [titleLabel, usernameField, skullMenu, okBTN, cancel]\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editCommandBlock(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"Control\", \"Control\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Command\"] = pymclevel.TAG_String()\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(\"@\")\n tileEntity[\"TrackOutput\"] = pymclevel.TAG_Byte(0)\n tileEntity[\"SuccessCount\"] = pymclevel.TAG_Int(0)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Command Block\")\n commandField = TextFieldWrapped(width=650)\n nameField = TextFieldWrapped(width=200)\n successField = IntInputRow(\"SuccessCount\", min=0, max=15)\n trackOutput = CheckBox()\n\n # Fix for the '§ is ħ' issue\n# try:\n# commandField.value = tileEntity[\"Command\"].value.decode(\"unicode-escape\")\n# except:\n# commandField.value = tileEntity[\"Command\"].value\n commandField.value = tileEntity[\"Command\"].value\n\n oldCommand = commandField.value\n trackOutput.value = tileEntity.get(\"TrackOutput\", pymclevel.TAG_Byte(0)).value\n oldTrackOutput = trackOutput.value\n nameField.value = tileEntity.get(\"CustomName\", pymclevel.TAG_String(\"@\")).value\n oldNameField = nameField.value\n successField.subwidgets[1].value = tileEntity.get(\"SuccessCount\", pymclevel.TAG_Int(0)).value\n oldSuccess = successField.subwidgets[1].value\n\n class CommandBlockEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateCommandBlock():\n if oldCommand != commandField.value or oldTrackOutput != trackOutput.value or oldNameField != nameField.value or oldSuccess != successField.subwidgets[1].value:\n tileEntity[\"Command\"] = pymclevel.TAG_String(commandField.value)\n tileEntity[\"TrackOutput\"] = pymclevel.TAG_Byte(trackOutput.value)\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(nameField.value)\n tileEntity[\"SuccessCount\"] = pymclevel.TAG_Int(successField.subwidgets[1].value)\n\n op = CommandBlockEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBTN = Button(\"OK\", action=updateCommandBlock)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n column = [titleLabel, Label(\"Command:\"), commandField, Row((Label(\"Custom Name:\"), nameField)), successField,\n Row((Label(\"Track Output\"), trackOutput)), okBTN, cancel]\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n return\n\n @mceutils.alertException\n def editContainer(self, point, containerID):\n tileEntityTag = self.editor.level.tileEntityAt(*point)\n if tileEntityTag is None:\n tileEntityTag = pymclevel.TileEntity.Create(containerID)\n pymclevel.TileEntity.setpos(tileEntityTag, point)\n self.editor.level.addTileEntity(tileEntityTag)\n\n if tileEntityTag[\"id\"].value != containerID:\n return\n\n undoBackupEntityTag = copy.deepcopy(tileEntityTag)\n\n def itemProp(key):\n # xxx do validation here\n def getter(self):\n if 0 == len(tileEntityTag[\"Items\"]):\n return 0\n return tileEntityTag[\"Items\"][self.selectedItemIndex][key].value\n\n def setter(self, val):\n if 0 == len(tileEntityTag[\"Items\"]):\n return\n self.dirty = True\n tileEntityTag[\"Items\"][self.selectedItemIndex][key].value = val\n\n return property(getter, setter)\n\n class ChestWidget(Widget):\n dirty = False\n Slot = itemProp(\"Slot\")\n id = itemProp(\"id\")\n Damage = itemProp(\"Damage\")\n Count = itemProp(\"Count\")\n itemLimit = pymclevel.TileEntity.maxItems.get(containerID, 26)\n\n def slotFormat(slot):\n slotNames = pymclevel.TileEntity.slotNames.get(containerID)\n if slotNames:\n return slotNames.get(slot, slot)\n return slot\n\n chestWidget = ChestWidget()\n chestItemTable = TableView(columns=[\n TableColumn(\"Slot\", 60, \"l\", fmt=slotFormat),\n TableColumn(\"ID / ID Name\", 345, \"l\"),\n TableColumn(\"DMG\", 50, \"l\"),\n TableColumn(\"Count\", 65, \"l\"),\n\n TableColumn(\"Name\", 260, \"l\"),\n ])\n\n def itemName(id, damage):\n try:\n return pymclevel.items.items.findItem(id, damage).name\n except pymclevel.items.ItemNotFound:\n return \"Unknown Item\"\n\n def getRowData(i):\n item = tileEntityTag[\"Items\"][i]\n slot, id, damage, count = item[\"Slot\"].value, item[\"id\"].value, item[\"Damage\"].value, item[\"Count\"].value\n return slot, id, damage, count, itemName(id, damage)\n\n chestWidget.selectedItemIndex = 0\n\n def selectTableRow(i, evt):\n chestWidget.selectedItemIndex = i\n # Disabling the item selector for now, since we need PE items resources.\n# if evt.num_clicks > 1:\n# selectButtonAction()\n\n def changeValue(data):\n s, i, c, d = data\n s = int(s)\n chestWidget.Slot = s\n chestWidget.id = i\n chestWidget.Count = int(c)\n chestWidget.Damage = int(d)\n\n\n chestItemTable.num_rows = lambda: len(tileEntityTag[\"Items\"])\n chestItemTable.row_data = getRowData\n chestItemTable.row_is_selected = lambda x: x == chestWidget.selectedItemIndex\n chestItemTable.click_row = selectTableRow\n chestItemTable.change_value = changeValue\n\n def selectButtonAction():\n SlotEditor(chestItemTable,\n (chestWidget.Slot, chestWidget.id or u\"\", chestWidget.Count, chestWidget.Damage)\n ).present()\n\n maxSlot = pymclevel.TileEntity.maxItems.get(tileEntityTag[\"id\"].value, 27) - 1\n fieldRow = (\n IntInputRow(\"Slot: \", ref=AttrRef(chestWidget, 'Slot'), min=0, max=maxSlot),\n BasicTextInputRow(\"ID / ID Name: \", ref=AttrRef(chestWidget, 'id'), width=300),\n # Text to allow the input of internal item names\n IntInputRow(\"DMG: \", ref=AttrRef(chestWidget, 'Damage'), min=0, max=32767),\n IntInputRow(\"Count: \", ref=AttrRef(chestWidget, 'Count'), min=-1, max=64),\n # This button is inactive for now, because we need to work with different IDs types:\n # * The 'human' IDs: Stone, Glass, Swords...\n # * The MC ones: minecraft:stone, minecraft:air...\n # * The PE ones: 0:0, 1:0...\n# Button(\"Select\", action=selectButtonAction)\n )\n\n def deleteFromWorld():\n i = chestWidget.selectedItemIndex\n item = tileEntityTag[\"Items\"][i]\n id = item[\"id\"].value\n Damage = item[\"Damage\"].value\n\n deleteSameDamage = CheckBoxLabel(\"Only delete items with the same damage value\")\n deleteBlocksToo = CheckBoxLabel(\"Also delete blocks placed in the world\")\n if id not in (8, 9, 10, 11): # fluid blocks\n deleteBlocksToo.value = True\n\n w = wrapped_label(\n \"WARNING: You are about to modify the entire world. This cannot be undone. Really delete all copies of this item from all land, chests, furnaces, dispensers, dropped items, item-containing tiles, and player inventories in this world?\",\n 60)\n col = (w, deleteSameDamage)\n if id < 256:\n col += (deleteBlocksToo,)\n\n d = Dialog(Column(col), [\"OK\", \"Cancel\"])\n\n if d.present() == \"OK\":\n def deleteItemsIter():\n i = 0\n if deleteSameDamage.value:\n def matches(t):\n return t[\"id\"].value == id and t[\"Damage\"].value == Damage\n else:\n def matches(t):\n return t[\"id\"].value == id\n\n def matches_itementity(e):\n if e[\"id\"].value != \"Item\":\n return False\n if \"Item\" not in e:\n return False\n t = e[\"Item\"]\n return matches(t)\n\n for player in self.editor.level.players:\n tag = self.editor.level.getPlayerTag(player)\n tag[\"Inventory\"].value = [t for t in tag[\"Inventory\"].value if not matches(t)]\n\n for chunk in self.editor.level.getChunks():\n if id < 256 and deleteBlocksToo.value:\n matchingBlocks = chunk.Blocks == id\n if deleteSameDamage.value:\n matchingBlocks &= chunk.Data == Damage\n if any(matchingBlocks):\n chunk.Blocks[matchingBlocks] = 0\n chunk.Data[matchingBlocks] = 0\n chunk.chunkChanged()\n self.editor.invalidateChunks([chunk.chunkPosition])\n\n for te in chunk.TileEntities:\n if \"Items\" in te:\n l = len(te[\"Items\"])\n\n te[\"Items\"].value = [t for t in te[\"Items\"].value if not matches(t)]\n if l != len(te[\"Items\"]):\n chunk.dirty = True\n entities = [e for e in chunk.Entities if matches_itementity(e)]\n if len(entities) != len(chunk.Entities):\n chunk.Entities.value = entities\n chunk.dirty = True\n\n yield (i, self.editor.level.chunkCount)\n i += 1\n\n progressInfo = _(\"Deleting the item {0} from the entire world ({1} chunks)\").format(\n itemName(chestWidget.id, 0), self.editor.level.chunkCount)\n\n showProgress(progressInfo, deleteItemsIter(), cancel=True)\n\n self.editor.addUnsavedEdit()\n chestWidget.selectedItemIndex = min(chestWidget.selectedItemIndex, len(tileEntityTag[\"Items\"]) - 1)\n\n def deleteItem():\n i = chestWidget.selectedItemIndex\n item = tileEntityTag[\"Items\"][i]\n tileEntityTag[\"Items\"].value = [t for t in tileEntityTag[\"Items\"].value if t is not item]\n chestWidget.selectedItemIndex = min(chestWidget.selectedItemIndex, len(tileEntityTag[\"Items\"]) - 1)\n\n def deleteEnable():\n return len(tileEntityTag[\"Items\"]) and chestWidget.selectedItemIndex != -1\n\n def addEnable():\n return len(tileEntityTag[\"Items\"]) < chestWidget.itemLimit\n\n def addItem():\n slot = 0\n for item in tileEntityTag[\"Items\"]:\n if slot == item[\"Slot\"].value:\n slot += 1\n if slot >= chestWidget.itemLimit:\n return\n item = pymclevel.TAG_Compound()\n item[\"id\"] = pymclevel.TAG_String(\"minecraft:\")\n item[\"Damage\"] = pymclevel.TAG_Short(0)\n item[\"Slot\"] = pymclevel.TAG_Byte(slot)\n item[\"Count\"] = pymclevel.TAG_Byte(1)\n tileEntityTag[\"Items\"].append(item)\n\n addItemButton = Button(\"New Item (1.7+)\", action=addItem, enable=addEnable)\n deleteItemButton = Button(\"Delete This Item\", action=deleteItem, enable=deleteEnable)\n deleteFromWorldButton = Button(\"Delete All Instances Of This Item From World\", action=deleteFromWorld,\n enable=deleteEnable)\n deleteCol = Column((addItemButton, deleteItemButton, deleteFromWorldButton))\n\n fieldRow = Row(fieldRow)\n col = Column((chestItemTable, fieldRow, deleteCol))\n\n chestWidget.add(col)\n chestWidget.shrink_wrap()\n\n Dialog(client=chestWidget, responses=[\"Done\"]).present()\n level = self.editor.level\n\n class ChestEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n level.addTileEntity(tileEntityTag)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntityTag)\n level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntityTag), (1, 1, 1))\n\n def redo(self):\n level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntityTag), (1, 1, 1))\n\n if chestWidget.dirty:\n op = ChestEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editFlowerPot(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.mcedit_defs.get(\"FlowerPot\", \"FlowerPot\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Item\"] = pymclevel.TAG_String(\"\")\n tileEntity[\"Data\"] = pymclevel.TAG_Int(0)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Flower Pot\")\n Item = TextFieldWrapped(width=300, text=tileEntity[\"Item\"].value)\n oldItem = Item.value\n Data = IntField(width=300,text=str(tileEntity[\"Data\"].value))\n oldData = Data.value\n\n class FlowerPotEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateFlowerPot():\n if oldData != Data.value or oldItem != Item.value:\n tileEntity[\"Item\"] = pymclevel.TAG_String(Item.value)\n tileEntity[\"Data\"] = pymclevel.TAG_Int(Data.value)\n\n op = FlowerPotEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBtn = Button(\"OK\", action=updateFlowerPot)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n panel.add(Column((titleLabel, Row((Label(\"Item\"), Item)), Row((Label(\"Data\"), Data)), okBtn, cancel)))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editEnchantmentTable(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"EnchantTable\", \"EnchantTable\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(\"\")\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Enchantment Table\")\n try:\n name = tileEntity[\"CustomName\"].value\n except:\n name = \"\"\n name = TextFieldWrapped(width=300, text=name)\n oldName = name.value\n\n class EnchantmentTableEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateEnchantmentTable():\n if oldName != name.value:\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(name.value)\n\n op = EnchantmentTableEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBtn = Button(\"OK\", action=updateEnchantmentTable)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n panel.add(Column((titleLabel, Row((Label(\"Custom Name\"), name)), okBtn, cancel)))\n panel.shrink_wrap()\n panel.present()\n\n should_lock = False\n\n def rightClickDown(self, evt):\n # self.rightMouseDragStart = datetime.now()\n self.should_lock = True\n self.toggleMouseLook()\n\n def rightClickUp(self, evt):\n if not get_top_widget().is_modal:\n return\n if not self.should_lock and self.editor.level:\n self.should_lock = False\n self.toggleMouseLook()\n # if self.rightMouseDragStart is None:\n # return\n\n # td = datetime.now() - self.rightMouseDragStart\n # # except AttributeError:\n # # return\n # # print \"RightClickUp: \", td\n # if td.microseconds > 180000:\n # self.mouseLookOff()\n\n def leftClickDown(self, evt):\n self.editor.toolMouseDown(evt, self.blockFaceUnderCursor)\n\n if evt.num_clicks == 2:\n def distance2(p1, p2):\n return numpy.sum(map(lambda a, b: (a - b) ** 2, p1, p2))\n\n point, face = self.blockFaceUnderCursor\n if point:\n point = map(lambda x: int(numpy.floor(x)), point)\n if self.editor.currentTool is self.editor.selectionTool:\n try:\n block = self.editor.level.blockAt(*point)\n materials = self.editor.level.materials\n if distance2(point, self.cameraPosition) > 4:\n blockEditors = {\n materials.MonsterSpawner.ID: self.editMonsterSpawner,\n materials.Sign.ID: self.editSign,\n materials.WallSign.ID: self.editSign,\n materials.MobHead.ID: self.editSkull,\n materials.CommandBlock.ID: self.editCommandBlock,\n materials.CommandBlockRepeating.ID: self.editCommandBlock,\n materials.CommandBlockChain.ID: self.editCommandBlock,\n pymclevel.alphaMaterials.Jukebox.ID: self.editJukebox,\n materials.NoteBlock.ID: self.editNoteBlock,\n materials.FlowerPot.ID: self.editFlowerPot,\n materials.EnchantmentTable.ID: self.editEnchantmentTable\n }\n edit = blockEditors.get(block)\n if edit:\n self.editor.endSelection()\n edit(point)\n else:\n # detect \"container\" tiles\n te = self.editor.level.tileEntityAt(*point)\n if te and \"Items\" in te and \"id\" in te:\n self.editor.endSelection()\n self.editContainer(point, te[\"id\"].value)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n pass\n\n def leftClickUp(self, evt):\n self.editor.toolMouseUp(evt, self.blockFaceUnderCursor)\n\n # --- Event handlers ---\n\n def mouse_down(self, evt):\n button = keys.remapMouseButton(evt.button)\n logging.debug(\"Mouse down %d @ %s\", button, evt.pos)\n\n if button == 1:\n if sys.platform == \"darwin\" and evt.ctrl:\n self.rightClickDown(evt)\n else:\n self.leftClickDown(evt)\n elif button == 2:\n self.rightClickDown(evt)\n elif button == 3 and sys.platform == \"darwin\" and evt.alt:\n self.leftClickDown(evt)\n else:\n evt.dict['keyname'] = \"mouse{}\".format(button)\n self.editor.key_down(evt)\n\n self.editor.focus_on(None)\n # self.focus_switch = None\n\n def mouse_up(self, evt):\n button = keys.remapMouseButton(evt.button)\n logging.debug(\"Mouse up %d @ %s\", button, evt.pos)\n if button == 1:\n if sys.platform == \"darwin\" and evt.ctrl:\n self.rightClickUp(evt)\n else:\n self.leftClickUp(evt)\n elif button == 2:\n self.rightClickUp(evt)\n elif button == 3 and sys.platform == \"darwin\" and evt.alt:\n self.leftClickUp(evt)\n else:\n evt.dict['keyname'] = \"mouse{}\".format(button)\n self.editor.key_up(evt)\n\n def mouse_drag(self, evt):\n self.mouse_move(evt)\n self.editor.mouse_drag(evt)\n\n lastRendererUpdate = datetime.now()\n\n def mouse_move(self, evt):\n if self.avoidMouseJumpBug == 2:\n self.avoidMouseJumpBug = 0\n return\n\n def sensitivityAdjust(d):\n return d * config.controls.mouseSpeed.get() / 10.0\n\n self.editor.mouseEntered = True\n if self.mouseMovesCamera:\n self.should_lock = False\n pitchAdjust = sensitivityAdjust(evt.rel[1])\n if self.invertMousePitch:\n pitchAdjust = -pitchAdjust\n self.yaw += sensitivityAdjust(evt.rel[0])\n self.pitch += pitchAdjust\n if datetime.now() - self.lastRendererUpdate > timedelta(0, 0, 500000):\n self.editor.renderer.loadNearbyChunks()\n self.lastRendererUpdate = datetime.now()\n\n # adjustLimit = 2\n\n # self.oldMousePosition = (x, y)\n # if (self.startingMousePosition[0] - x > adjustLimit or self.startingMousePosition[1] - y > adjustLimit or\n # self.startingMousePosition[0] - x < -adjustLimit or self.startingMousePosition[1] - y < -adjustLimit):\n # mouse.set_pos(*self.startingMousePosition)\n # event.get(MOUSEMOTION)\n # self.oldMousePosition = (self.startingMousePosition)\n\n #if config.settings.showCommands.get():\n\n def activeevent(self, evt):\n if evt.state & 0x2 and evt.gain != 0:\n self.avoidMouseJumpBug = 1\n\n @property\n def tooltipText(self):\n #if self.hoveringCommandBlock[0] and (self.editor.currentTool is self.editor.selectionTool and self.editor.selectionTool.infoKey == 0):\n # return self.hoveringCommandBlock[1] or \"[Empty]\"\n if self.editor.currentTool is self.editor.selectionTool and self.editor.selectionTool.infoKey == 0 and config.settings.showQuickBlockInfo.get():\n point, face = self.blockFaceUnderCursor\n if point:\n if not self.block_info_parsers or (BlockInfoParser.last_level != self.editor.level):\n self.block_info_parsers = BlockInfoParser.get_parsers(self.editor)\n block = self.editor.level.blockAt(*point)\n if block:\n if block in self.block_info_parsers:\n return self.block_info_parsers[block](point)\n return self.editor.currentTool.worldTooltipText\n\n floorQuad = numpy.array(((-4000.0, 0.0, -4000.0),\n (-4000.0, 0.0, 4000.0),\n (4000.0, 0.0, 4000.0),\n (4000.0, 0.0, -4000.0),\n ), dtype='float32')\n\n def updateFloorQuad(self):\n floorQuad = ((-4000.0, 0.0, -4000.0),\n (-4000.0, 0.0, 4000.0),\n (4000.0, 0.0, 4000.0),\n (4000.0, 0.0, -4000.0),\n )\n\n floorQuad = numpy.array(floorQuad, dtype='float32')\n if self.editor.renderer.inSpace():\n floorQuad *= 8.0\n floorQuad += (self.cameraPosition[0], 0.0, self.cameraPosition[2])\n self.floorQuad = floorQuad\n self.floorQuadList.invalidate()\n\n def drawFloorQuad(self):\n self.floorQuadList.call(self._drawFloorQuad)\n\n @staticmethod\n def _drawCeiling():\n lines = []\n minz = minx = -256\n maxz = maxx = 256\n append = lines.append\n for x in xrange(minx, maxx + 1, 16):\n append((x, 0, minz))\n append((x, 0, maxz))\n for z in xrange(minz, maxz + 1, 16):\n append((minx, 0, z))\n append((maxx, 0, z))\n\n GL.glColor(0.3, 0.7, 0.9)\n GL.glVertexPointer(3, GL.GL_FLOAT, 0, numpy.array(lines, dtype='float32'))\n\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glDepthMask(False)\n GL.glDrawArrays(GL.GL_LINES, 0, len(lines))\n GL.glDisable(GL.GL_DEPTH_TEST)\n GL.glDepthMask(True)\n\n def drawCeiling(self):\n GL.glMatrixMode(GL.GL_MODELVIEW)\n # GL.glPushMatrix()\n x, y, z = self.cameraPosition\n x -= x % 16\n z -= z % 16\n y = self.editor.level.Height\n GL.glTranslate(x, y, z)\n self.ceilingList.call(self._drawCeiling)\n GL.glTranslate(-x, -y, -z)\n\n _floorQuadList = None\n\n @property\n def floorQuadList(self):\n if not self._floorQuadList:\n self._floorQuadList = glutils.DisplayList()\n return self._floorQuadList\n\n _ceilingList = None\n\n @property\n def ceilingList(self):\n if not self._ceilingList:\n self._ceilingList = glutils.DisplayList()\n return self._ceilingList\n\n @property\n def floorColor(self):\n if self.drawSky:\n return 0.0, 0.0, 1.0, 0.3\n else:\n return 0.0, 1.0, 0.0, 0.15\n\n # floorColor = (0.0, 0.0, 1.0, 0.1)\n\n def _drawFloorQuad(self):\n GL.glDepthMask(True)\n GL.glPolygonOffset(DepthOffset.ChunkMarkers + 2, DepthOffset.ChunkMarkers + 2)\n GL.glVertexPointer(3, GL.GL_FLOAT, 0, self.floorQuad)\n GL.glColor(*self.floorColor)\n with gl.glEnable(GL.GL_BLEND, GL.GL_DEPTH_TEST, GL.GL_POLYGON_OFFSET_FILL):\n GL.glDrawArrays(GL.GL_QUADS, 0, 4)\n\n @property\n def drawSky(self):\n return self._drawSky\n\n @drawSky.setter\n def drawSky(self, val):\n self._drawSky = val\n if self.skyList:\n self.skyList.invalidate()\n if self._floorQuadList:\n self._floorQuadList.invalidate()\n\n skyList = None\n\n def drawSkyBackground(self):\n if self.skyList is None:\n self.skyList = glutils.DisplayList()\n self.skyList.call(self._drawSkyBackground)\n\n def _drawSkyBackground(self):\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glPushMatrix()\n GL.glLoadIdentity()\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glPushMatrix()\n GL.glLoadIdentity()\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n\n quad = numpy.array([-1, -1, -1, 1, 1, 1, 1, -1], dtype='float32')\n if self.editor.level.dimNo == -1:\n colors = numpy.array([0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff, ], dtype='uint8')\n elif self.editor.level.dimNo == 1:\n colors = numpy.array([0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff, ], dtype='uint8')\n else:\n colors = numpy.array([0x48, 0x49, 0xBA, 0xff,\n 0x8a, 0xaf, 0xff, 0xff,\n 0x8a, 0xaf, 0xff, 0xff,\n 0x48, 0x49, 0xBA, 0xff, ], dtype='uint8')\n\n alpha = 1.0\n\n if alpha > 0.0:\n if alpha < 1.0:\n GL.glEnable(GL.GL_BLEND)\n\n GL.glVertexPointer(2, GL.GL_FLOAT, 0, quad)\n GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, 0, colors)\n GL.glDrawArrays(GL.GL_QUADS, 0, 4)\n\n if alpha < 1.0:\n GL.glDisable(GL.GL_BLEND)\n\n GL.glDisableClientState(GL.GL_COLOR_ARRAY)\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glPopMatrix()\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glPopMatrix()\n\n enableMouseLag = config.settings.enableMouseLag.property()\n\n @property\n def drawFog(self):\n return self._drawFog and not self.editor.renderer.inSpace()\n\n @drawFog.setter\n def drawFog(self, val):\n self._drawFog = val\n\n fogColor = numpy.array([0.6, 0.8, 1.0, 1.0], dtype='float32')\n fogColorBlack = numpy.array([0.0, 0.0, 0.0, 1.0], dtype='float32')\n\n def enableFog(self):\n GL.glEnable(GL.GL_FOG)\n if self.drawSky:\n GL.glFogfv(GL.GL_FOG_COLOR, self.fogColor)\n else:\n GL.glFogfv(GL.GL_FOG_COLOR, self.fogColorBlack)\n\n GL.glFogf(GL.GL_FOG_DENSITY, 0.0001 * config.settings.fogIntensity.get())\n\n @staticmethod\n def disableFog():\n GL.glDisable(GL.GL_FOG)\n\n def getCameraPoint(self):\n distance = self.editor.currentTool.cameraDistance\n return [i for i in itertools.imap(lambda p, d: int(numpy.floor(p + d * distance)),\n self.cameraPosition,\n self.cameraVector)]\n\n blockFaceUnderCursor = (0, 0, 0), (0, 0, 0)\n\n viewingFrustum = None\n\n def setup_projection(self):\n distance = 1.0\n if self.editor.renderer.inSpace():\n distance = 8.0\n GLU.gluPerspective(max(self.fov, 25.0), self.ratio, self.near * distance, self.far * distance)\n\n def setup_modelview(self):\n self.setModelview()\n\n def gl_draw(self):\n self.tickCamera(self.editor.frameStartTime, self.editor.cameraInputs, self.editor.renderer.inSpace())\n self.render()\n\n def render(self):\n self.viewingFrustum = frustum.Frustum.fromViewingMatrix()\n\n if self.superSecretSettings:\n self.editor.drawStars()\n if self.drawSky:\n self.drawSkyBackground()\n if self.drawFog:\n self.enableFog()\n\n self.drawFloorQuad()\n\n self.editor.renderer.viewingFrustum = self.viewingFrustum\n self.editor.renderer.draw()\n\n if self.showCeiling and not self.editor.renderer.inSpace():\n self.drawCeiling()\n\n if self.editor.level:\n try:\n self.updateBlockFaceUnderCursor()\n except (EnvironmentError, pymclevel.ChunkNotPresent) as e:\n logging.debug(\"Updating cursor block: %s\", e)\n self.blockFaceUnderCursor = (None, None)\n\n self.root.update_tooltip()\n\n (blockPosition, faceDirection) = self.blockFaceUnderCursor\n if blockPosition:\n self.editor.updateInspectionString(blockPosition)\n\n if self.find_widget(mouse.get_pos()) == self:\n ct = self.editor.currentTool\n if ct:\n ct.drawTerrainReticle()\n ct.drawToolReticle()\n else:\n self.editor.drawWireCubeReticle()\n\n for t in self.editor.toolbar.tools:\n t.drawTerrainMarkers()\n t.drawToolMarkers()\n\n if self.drawFog:\n self.disableFog()\n\n if self.compassToggle:\n if self._compass is None:\n self._compass = CompassOverlay()\n\n x = getattr(getattr(self.editor, 'copyPanel', None), 'width', 0)\n if x:\n x = x /float( self.editor.mainViewport.width)\n self._compass.x = x\n self._compass.yawPitch = self.yaw, 0\n\n with gl.glPushMatrix(GL.GL_PROJECTION):\n GL.glLoadIdentity()\n GL.glOrtho(0., 1., float(self.height) / self.width, 0, -200, 200)\n\n self._compass.draw()\n else:\n self._compass = None\n\n _compass = None\n \nclass BlockInfoParser(object):\n last_level = None\n nbt_ending = \"\\n\\nPress ALT for NBT\"\n edit_ending = \", Double-Click to Edit\"\n \n @classmethod\n def get_parsers(cls, editor):\n cls.last_level = editor.level\n parser_map = {}\n for subcls in cls.__subclasses__():\n instance = subcls(editor.level)\n try:\n blocks = instance.getBlocks()\n except KeyError:\n continue\n if isinstance(blocks, (str, int)):\n parser_map[blocks] = instance.parse_info\n elif isinstance(blocks, (list, tuple)):\n for block in blocks:\n parser_map[block] = instance.parse_info\n return parser_map\n \n def getBlocks(self):\n raise NotImplementedError()\n \n def parse_info(self, pos):\n raise NotImplementedError()\n\n\nclass SpawnerInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return self.level.materials[\"minecraft:mob_spawner\"].ID\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n spawn_data = tile_entity.get(\"SpawnData\", {})\n if spawn_data:\n id = spawn_data.get('EntityId', None)\n if not id:\n id = spawn_data.get('id', None)\n if not id:\n value = repr(NameError(\"Malformed spawn data: could not find 'EntityId' or 'id' tag.\"))\n else:\n value = id.value\n return \"{} Spawner{}{}\".format(value, self.nbt_ending, self.edit_ending)\n return \"[Empty]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass JukeboxInfoParser(BlockInfoParser):\n id_records = {\n 2256: \"13\",\n 2257: \"Cat\",\n 2258: \"Blocks\",\n 2259: \"Chirp\",\n 2260: \"Far\",\n 2261: \"Mall\",\n 2262: \"Mellohi\",\n 2263: \"Stal\",\n 2264: \"Strad\",\n 2265: \"Ward\",\n 2266: \"11\",\n 2267: \"Wait\"\n }\n \n name_records = {\n \"minecraft:record_13\": \"13\",\n \"minecraft:record_cat\": \"Cat\",\n \"minecraft:record_blocks\": \"Blocks\",\n \"minecraft:record_chirp\": \"Chirp\",\n \"minecraft:record_far\": \"Far\",\n \"minecraft:record_mall\": \"Mall\",\n \"minecraft:record_mellohi\": \"Mellohi\",\n \"minecraft:record_stal\": \"Stal\",\n \"minecraft:record_strad\": \"Strad\",\n \"minecraft:record_ward\": \"Ward\",\n \"minecraft:record_11\": \"11\",\n \"minecraft:record_wait\": \"Wait\"\n }\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return self.level.materials[\"minecraft:jukebox\"].ID\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n if \"Record\" in tile_entity:\n value = tile_entity[\"Record\"].value\n if value in self.id_records:\n return self.id_records[value] + \" Record\" + self.nbt_ending + self.edit_ending\n elif \"RecordItem\" in tile_entity:\n value = tile_entity[\"RecordItem\"][\"id\"].value\n if value in self.name_records:\n return \"{} Record{}{}\".format(self.name_records[value], self.nbt_ending, self.edit_ending)\n return \"[No Record]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass CommandBlockInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return [\n self.level.materials[\"minecraft:command_block\"].ID,\n self.level.materials[\"minecraft:repeating_command_block\"].ID,\n self.level.materials[\"minecraft:chain_command_block\"].ID\n ]\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n value = tile_entity.get(\"Command\", pymclevel.TAG_String(\"\")).value\n if value:\n if len(value) > 1500:\n return \"{}\\n**COMMAND IS TOO LONG TO SHOW MORE**{}{}\".format(value[:1500], self.nbt_ending, self.edit_ending)\n return \"{}{}{}\".format(value, self.nbt_ending, self.edit_ending)\n return \"[Empty Command Block]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass ContainerInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return [\n self.level.materials[\"minecraft:dispenser\"].ID,\n self.level.materials[\"minecraft:chest\"].ID,\n self.level.materials[\"minecraft:furnace\"].ID,\n self.level.materials[\"minecraft:lit_furnace\"].ID,\n self.level.materials[\"minecraft:trapped_chest\"].ID,\n self.level.materials[\"minecraft:hopper\"].ID,\n self.level.materials[\"minecraft:dropper\"].ID,\n self.level.materials[\"minecraft:brewing_stand\"].ID\n ]\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n return \"Contains {} Items {}{}\".format(len(tile_entity.get(\"Items\", [])), self.nbt_ending, self.edit_ending)\n return \"[Empty Container]{}{}\".format(self.nbt_ending, self.edit_ending)\n\ndef unproject(x, y, z):\n try:\n return GLU.gluUnProject(x, y, z)\n except ValueError: # projection failed\n return 0, 0, 0\n" ]
[ [ "numpy.radians", "numpy.cos", "numpy.sin", "numpy.floor", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ziyoujiyi/PaddleRec
[ "9a107c56af2d1ee282975bcc8edb1ad5fb7e7973", "9a107c56af2d1ee282975bcc8edb1ad5fb7e7973" ]
[ "models/recall/ncf/evaluate.py", "models/recall/mind/infer.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport sklearn\nimport math\n\"\"\"\nExtracting information from infer data\n\"\"\"\nfilename = './result.txt'\nf = open(filename, \"r\")\nlines = f.readlines()\nf.close()\nresult = []\nfor line in lines:\n if \"prediction\" in str(line):\n result.append(line)\nresult = result[:-1]\n\npair = []\nfor line in result:\n line = line.strip().split(\",\")\n for seg in line:\n if \"user\" in seg:\n user_id = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n if \"prediction\" in seg:\n prediction = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n if \"label\" in seg:\n label = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n pair.append([int(user_id), float(prediction), int(label)])\n\n\ndef takeSecond(x):\n return x[1]\n\n\n\"\"\"\nEvaluate the performance (Hit_Ratio, NDCG) of top-K recommendation\n\"\"\"\nhits = []\nndcg = []\npair = [pair[i:i + 100] for i in range(0, len(pair), 100)]\nfor user in pair:\n user.sort(key=takeSecond, reverse=True)\n each_user_top10_line = user[:10]\n each_user_top10_line_label = [i[2] for i in each_user_top10_line]\n if 1 in each_user_top10_line_label:\n i = each_user_top10_line_label.index(1)\n ndcg.append(math.log(2) / math.log(i + 2))\n hits.append(1)\n else:\n hits.append(0)\n ndcg.append(0)\n\nprint(\"user_num:\", len(hits))\nprint(\"hit ratio:\", np.array(hits).mean())\nprint(\"ndcg:\", np.array(ndcg).mean())\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport argparse\nimport time\n\nimport os\nimport warnings\nimport logging\nimport paddle\nimport sys\nimport numpy as np\nimport math\n__dir__ = os.path.dirname(os.path.abspath(__file__))\n# sys.path.append(__dir__)\nsys.path.append(\n os.path.abspath(os.path.join(__dir__, '..', '..', '..', 'tools')))\n\nfrom utils.save_load import save_model, load_model\nfrom utils.utils_single import load_yaml, get_abs_model, create_data_loader, reset_auc, load_dy_model_class\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"PaddleRec train static script\")\n parser.add_argument(\"-m\", \"--config_yaml\", type=str)\n parser.add_argument(\"-top_n\", \"--top_n\", type=int, default=20)\n args = parser.parse_args()\n args.abs_dir = os.path.dirname(os.path.abspath(args.config_yaml))\n args.config_yaml = get_abs_model(args.config_yaml)\n return args\n\n\ndef main(args):\n paddle.seed(12345)\n\n # load config\n config = load_yaml(args.config_yaml)\n config[\"config_abs_dir\"] = args.abs_dir\n # load static model class\n dy_model_class = load_dy_model_class(config)\n\n use_gpu = config.get(\"runner.use_gpu\", True)\n test_data_dir = config.get(\"runner.test_data_dir\", None)\n print_interval = config.get(\"runner.print_interval\", None)\n model_load_path = config.get(\"runner.infer_load_path\", \"model_output\")\n start_epoch = config.get(\"runner.infer_start_epoch\", 0)\n end_epoch = config.get(\"runner.infer_end_epoch\", 10)\n batch_size = config.get(\"runner.infer_batch_size\", None)\n os.environ[\"CPU_NUM\"] = str(config.get(\"runner.thread_num\", 1))\n\n logger.info(\"**************common.configs**********\")\n logger.info(\n \"use_gpu: {}, test_data_dir: {}, start_epoch: {}, end_epoch: {}, print_interval: {}, model_load_path: {}\".\n format(use_gpu, test_data_dir, start_epoch, end_epoch, print_interval,\n model_load_path))\n logger.info(\"**************common.configs**********\")\n\n place = paddle.set_device('gpu' if use_gpu else 'cpu')\n\n dy_model = dy_model_class.create_model(config)\n test_dataloader = create_data_loader(\n config=config, place=place, mode=\"test\")\n\n logger.info(\"read data\")\n\n epoch_begin = time.time()\n interval_begin = time.time()\n\n for epoch_id in range(start_epoch, end_epoch):\n logger.info(\"load model epoch {}\".format(epoch_id))\n model_path = os.path.join(model_load_path, str(epoch_id))\n load_model(model_path, dy_model)\n b = dy_model.item_emb.weight.numpy()\n\n import faiss\n if use_gpu:\n res = faiss.StandardGpuResources()\n flat_config = faiss.GpuIndexFlatConfig()\n flat_config.device = 0\n faiss_index = faiss.GpuIndexFlatIP(res, b.shape[-1], flat_config)\n faiss_index.add(b)\n else:\n faiss_index = faiss.IndexFlatIP(b.shape[-1])\n faiss_index.add(b)\n\n total = 1\n total_recall = 0.0\n total_ndcg = 0.0\n total_hitrate = 0\n\n for batch_id, batch_data in enumerate(test_dataloader()):\n\n user_embs, _ = dy_model_class.infer_forward(dy_model, None,\n batch_data, config)\n\n user_embs = user_embs.numpy()\n target_items = np.squeeze(batch_data[-1].numpy(), axis=1)\n\n if len(user_embs.shape) == 2:\n D, I = faiss_index.search(user_embs, args.top_n)\n for i, iid_list in enumerate(target_items):\n recall = 0\n dcg = 0.0\n item_list = set(I[i])\n iid_list = list(filter(lambda x: x != 0, list(iid_list)))\n for no, iid in enumerate(iid_list):\n if iid in item_list:\n recall += 1\n dcg += 1.0 / math.log(no + 2, 2)\n idcg = 0.0\n for no in range(recall):\n idcg += 1.0 / math.log(no + 2, 2)\n total_recall += recall * 1.0 / len(iid_list)\n if recall > 0:\n total_ndcg += dcg / idcg\n total_hitrate += 1\n else:\n ni = user_embs.shape[1]\n user_embs = np.reshape(user_embs, [-1, user_embs.shape[-1]])\n D, I = faiss_index.search(user_embs, args.top_n)\n for i, iid_list in enumerate(target_items):\n recall = 0\n dcg = 0.0\n item_list_set = set()\n item_list = list(\n zip(\n np.reshape(I[i * ni:(i + 1) * ni], -1),\n np.reshape(D[i * ni:(i + 1) * ni], -1)))\n item_list.sort(key=lambda x: x[1], reverse=True)\n for j in range(len(item_list)):\n if item_list[j][0] not in item_list_set and item_list[\n j][0] != 0:\n item_list_set.add(item_list[j][0])\n if len(item_list_set) >= args.top_n:\n break\n iid_list = list(filter(lambda x: x != 0, list(iid_list)))\n for no, iid in enumerate(iid_list):\n if iid == 0:\n break\n if iid in item_list_set:\n recall += 1\n dcg += 1.0 / math.log(no + 2, 2)\n idcg = 0.0\n for no in range(recall):\n idcg += 1.0 / math.log(no + 2, 2)\n\n total_recall += recall * 1.0 / len(iid_list)\n if recall > 0:\n total_ndcg += dcg / idcg\n total_hitrate += 1\n total += target_items.shape[0]\n\n if batch_id % print_interval == 0:\n recall = total_recall / total\n ndcg = total_ndcg / total\n hitrate = total_hitrate * 1.0 / total\n metric_str = \"\"\n metric_str += \"recall@%d: %.5f, \" % (args.top_n, recall)\n metric_str += \"ndcg@%d: %.5f, \" % (args.top_n, ndcg)\n metric_str += \"hitrate@%d: %.5f, \" % (args.top_n, hitrate)\n logger.info(\"epoch: {}, batch_id: {}, \".format(\n epoch_id, batch_id) + metric_str + \"speed: {:.2f} ins/s\".\n format(print_interval * batch_size / (time.time(\n ) - interval_begin)))\n\n recall = total_recall / total\n ndcg = total_ndcg / total\n hitrate = total_hitrate * 1.0 / total\n metric_str = \"\"\n metric_str += \"recall@%d: %.5f, \" % (args.top_n, recall)\n metric_str += \"ndcg@%d: %.5f, \" % (args.top_n, ndcg)\n metric_str += \"hitrate@%d: %.5f, \" % (args.top_n, hitrate)\n\n logger.info(\"epoch: {} done, \".format(epoch_id) + metric_str +\n \"epoch time: {:.2f} s\".format(time.time() - epoch_begin))\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n" ]
[ [ "numpy.array" ], [ "numpy.reshape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kayce001/mmdeploy
[ "59470fef0b28e0b760c72269e0696bbdf57db7f1", "59470fef0b28e0b760c72269e0696bbdf57db7f1" ]
[ "mmdeploy/codebase/mmcls/deploy/classification_model.py", "mmdeploy/codebase/mmdet/deploy/object_detection_model.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Sequence, Union\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcls.datasets import DATASETS\nfrom mmcls.models.classifiers.base import BaseClassifier\nfrom mmcv.utils import Registry\n\nfrom mmdeploy.codebase.base import BaseBackendModel\nfrom mmdeploy.utils import (Backend, get_backend, get_codebase_config,\n load_config)\n\n\ndef __build_backend_model(cls_name: str, registry: Registry, *args, **kwargs):\n return registry.module_dict[cls_name](*args, **kwargs)\n\n\n__BACKEND_MODEL = mmcv.utils.Registry(\n 'backend_classifiers', build_func=__build_backend_model)\n\n\n@__BACKEND_MODEL.register_module('end2end')\nclass End2EndModel(BaseBackendModel):\n \"\"\"End to end model for inference of classification.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files(e.g.\n '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string represents device type.\n class_names (Sequence[str]): A list of string specifying class names.\n deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(\n self,\n backend: Backend,\n backend_files: Sequence[str],\n device: str,\n class_names: Sequence[str],\n deploy_cfg: Union[str, mmcv.Config] = None,\n ):\n super(End2EndModel, self).__init__(deploy_cfg=deploy_cfg)\n self.CLASSES = class_names\n self.deploy_cfg = deploy_cfg\n self._init_wrapper(\n backend=backend, backend_files=backend_files, device=device)\n\n def _init_wrapper(self, backend: Backend, backend_files: Sequence[str],\n device: str):\n output_names = self.output_names\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=output_names,\n deploy_cfg=self.deploy_cfg)\n\n def forward(self, img: List[torch.Tensor], *args, **kwargs) -> list:\n \"\"\"Run forward inference.\n\n Args:\n img (List[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n\n if isinstance(img, list):\n input_img = img[0].contiguous()\n else:\n input_img = img.contiguous()\n outputs = self.forward_test(input_img, *args, **kwargs)\n\n return list(outputs)\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \\\n List[np.ndarray]:\n \"\"\"The interface for forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n List[np.ndarray]: A list of classification prediction.\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n outputs = [out.detach().cpu().numpy() for out in outputs]\n return outputs\n\n def show_result(self,\n img: np.ndarray,\n result: list,\n win_name: str = '',\n show: bool = True,\n out_file: str = None):\n \"\"\"Show predictions of classification.\n Args:\n img: (np.ndarray): Input image to draw predictions.\n result (list): A list of predictions.\n win_name (str): The name of visualization window.\n show (bool): Whether to show plotted image in windows. Defaults to\n `True`.\n out_file (str): Output image file to save drawn predictions.\n\n Returns:\n np.ndarray: Drawn image, only if not `show` or `out_file`.\n \"\"\"\n return BaseClassifier.show_result(\n self, img, result, show=show, win_name=win_name, out_file=out_file)\n\n\n@__BACKEND_MODEL.register_module('sdk')\nclass SDKEnd2EndModel(End2EndModel):\n \"\"\"SDK inference class, converts SDK output to mmcls format.\"\"\"\n\n def forward(self, img: List[torch.Tensor], *args, **kwargs) -> list:\n \"\"\"Run forward inference.\n\n Args:\n img (List[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n\n pred = self.wrapper.invoke(\n [img[0].contiguous().detach().cpu().numpy()])[0]\n pred = np.array(pred, dtype=np.float32)\n return pred[np.argsort(pred[:, 0])][np.newaxis, :, 1]\n\n\ndef get_classes_from_config(model_cfg: Union[str, mmcv.Config]):\n \"\"\"Get class name from config.\n\n Args:\n model_cfg (str | mmcv.Config): Input model config file or\n Config object.\n\n Returns:\n list[str]: A list of string specifying names of different class.\n \"\"\"\n model_cfg = load_config(model_cfg)[0]\n module_dict = DATASETS.module_dict\n data_cfg = model_cfg.data\n\n if 'train' in data_cfg:\n module = module_dict[data_cfg.train.type]\n elif 'val' in data_cfg:\n module = module_dict[data_cfg.val.type]\n elif 'test' in data_cfg:\n module = module_dict[data_cfg.test.type]\n else:\n raise RuntimeError(f'No dataset config found in: {model_cfg}')\n\n return module.CLASSES\n\n\ndef build_classification_model(model_files: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n device: str, **kwargs):\n \"\"\"Build classification model for different backend.\n\n Args:\n model_files (Sequence[str]): Input model file(s).\n model_cfg (str | mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str | mmcv.Config): Input deployment config file or\n Config object.\n device (str): Device to input model.\n\n Returns:\n BaseBackendModel: Classifier for a configured backend.\n \"\"\"\n # load cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n\n backend = get_backend(deploy_cfg)\n model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')\n class_names = get_classes_from_config(model_cfg)\n\n backend_classifier = __BACKEND_MODEL.build(\n model_type,\n backend=backend,\n backend_files=model_files,\n device=device,\n class_names=class_names,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n return backend_classifier\n", "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\nfrom typing import List, Sequence, Tuple, Union\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.utils import Registry\nfrom mmdet.core import bbox2result\nfrom mmdet.datasets import DATASETS\nfrom mmdet.models import BaseDetector\n\nfrom mmdeploy.backend.base import get_backend_file_count\nfrom mmdeploy.codebase.base import BaseBackendModel\nfrom mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms\nfrom mmdeploy.utils import (Backend, get_backend, get_codebase_config,\n get_partition_config, load_config)\n\n\ndef __build_backend_model(partition_name: str, backend: Backend,\n backend_files: Sequence[str], device: str,\n class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n registry: Registry, **kwargs):\n return registry.module_dict[partition_name](\n backend=backend,\n backend_files=backend_files,\n class_names=class_names,\n device=device,\n model_cfg=model_cfg,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n\n# Use registry to store models with different partition methods\n# If a model doesn't need to partition, we don't need this registry\n__BACKEND_MODEL = mmcv.utils.Registry(\n 'backend_detectors', build_func=__build_backend_model)\n\n\n@__BACKEND_MODEL.register_module('end2end')\nclass End2EndModel(BaseBackendModel):\n \"\"\"End to end model for inference of detection.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n super().__init__(deploy_cfg=deploy_cfg)\n self.CLASSES = class_names\n self.deploy_cfg = deploy_cfg\n self._init_wrapper(\n backend=backend, backend_files=backend_files, device=device)\n\n def _init_wrapper(self, backend: Backend, backend_files: Sequence[str],\n device: str):\n \"\"\"Initialize backend wrapper.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n \"\"\"\n output_names = self.output_names\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=output_names,\n deploy_cfg=self.deploy_cfg)\n\n @staticmethod\n def __clear_outputs(\n test_outputs: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[Union[List[torch.Tensor], List[np.ndarray]]]:\n \"\"\"Removes additional outputs and detections with zero and negative\n score.\n\n Args:\n test_outputs (List[Union[torch.Tensor, np.ndarray]]):\n outputs of forward_test.\n\n Returns:\n List[Union[List[torch.Tensor], List[np.ndarray]]]:\n outputs with without zero score object.\n \"\"\"\n batch_size = len(test_outputs[0])\n\n num_outputs = len(test_outputs)\n outputs = [[None for _ in range(batch_size)]\n for _ in range(num_outputs)]\n\n for i in range(batch_size):\n inds = test_outputs[0][i, :, 4] > 0.0\n for output_id in range(num_outputs):\n outputs[output_id][i] = test_outputs[output_id][i, inds, ...]\n return outputs\n\n @staticmethod\n def postprocessing_masks(det_bboxes: np.ndarray,\n det_masks: np.ndarray,\n img_w: int,\n img_h: int,\n mask_thr_binary: float = 0.5) -> np.ndarray:\n \"\"\"Additional processing of masks. Resizes masks from [num_det, 28, 28]\n to [num_det, img_w, img_h]. Analog of the 'mmdeploy.codebase.mmdet.\n models.roi_heads.fcn_mask_head._do_paste_mask' function.\n\n Args:\n det_bboxes (np.ndarray): Bbox of shape [num_det, 4]\n det_masks (np.ndarray): Masks of shape [num_det, 28, 28].\n img_w (int): Width of the original image.\n img_h (int): Height of the original image.\n mask_thr_binary (float): The threshold for the mask.\n\n Returns:\n np.ndarray: masks of shape [N, num_det, img_h, img_w].\n \"\"\"\n masks = det_masks\n bboxes = det_bboxes\n\n num_det = bboxes.shape[0]\n # Skip postprocessing if no detections are found.\n if num_det == 0:\n return np.zeros((0, img_h, img_w))\n\n if isinstance(masks, np.ndarray):\n masks = torch.tensor(masks)\n bboxes = torch.tensor(bboxes)\n\n result_masks = []\n for bbox, mask in zip(bboxes, masks):\n\n x0_int, y0_int = 0, 0\n x1_int, y1_int = img_w, img_h\n\n img_y = torch.arange(y0_int, y1_int, dtype=torch.float32) + 0.5\n img_x = torch.arange(x0_int, x1_int, dtype=torch.float32) + 0.5\n x0, y0, x1, y1 = bbox\n\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n if torch.isinf(img_x).any():\n inds = torch.where(torch.isinf(img_x))\n img_x[inds] = 0\n if torch.isinf(img_y).any():\n inds = torch.where(torch.isinf(img_y))\n img_y[inds] = 0\n\n gx = img_x[None, :].expand(img_y.size(0), img_x.size(0))\n gy = img_y[:, None].expand(img_y.size(0), img_x.size(0))\n grid = torch.stack([gx, gy], dim=2)\n\n img_masks = F.grid_sample(\n mask.to(dtype=torch.float32)[None, None, :, :],\n grid[None, :, :, :],\n align_corners=False)\n\n mask = img_masks\n mask = (mask >= mask_thr_binary).to(dtype=torch.bool)\n result_masks.append(mask.numpy())\n result_masks = np.concatenate(result_masks, axis=1)\n return result_masks.squeeze(0)\n\n def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Run forward inference.\n\n Args:\n img (Sequence[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of meta info for image(s).\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n input_img = img[0].contiguous()\n outputs = self.forward_test(input_img, img_metas, *args, **kwargs)\n outputs = End2EndModel.__clear_outputs(outputs)\n batch_dets, batch_labels = outputs[:2]\n batch_masks = outputs[2] if len(outputs) == 3 else None\n batch_size = input_img.shape[0]\n img_metas = img_metas[0]\n results = []\n rescale = kwargs.get('rescale', True)\n for i in range(batch_size):\n dets, labels = batch_dets[i], batch_labels[i]\n if rescale:\n scale_factor = img_metas[i]['scale_factor']\n\n if isinstance(scale_factor, (list, tuple, np.ndarray)):\n assert len(scale_factor) == 4\n scale_factor = np.array(scale_factor)[None, :] # [1,4]\n dets[:, :4] /= scale_factor\n\n if 'border' in img_metas[i]:\n # offset pixel of the top-left corners between original image\n # and padded/enlarged image, 'border' is used when exporting\n # CornerNet and CentripetalNet to onnx\n x_off = img_metas[i]['border'][2]\n y_off = img_metas[i]['border'][0]\n dets[:, [0, 2]] -= x_off\n dets[:, [1, 3]] -= y_off\n dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype)\n\n dets_results = bbox2result(dets, labels, len(self.CLASSES))\n\n if batch_masks is not None:\n masks = batch_masks[i]\n img_h, img_w = img_metas[i]['img_shape'][:2]\n ori_h, ori_w = img_metas[i]['ori_shape'][:2]\n export_postprocess_mask = True\n if self.deploy_cfg is not None:\n\n mmdet_deploy_cfg = get_post_processing_params(\n self.deploy_cfg)\n # this flag enable postprocess when export.\n export_postprocess_mask = mmdet_deploy_cfg.get(\n 'export_postprocess_mask', True)\n if not export_postprocess_mask:\n masks = End2EndModel.postprocessing_masks(\n dets[:, :4], masks, ori_w, ori_h)\n else:\n masks = masks[:, :img_h, :img_w]\n # avoid to resize masks with zero dim\n if rescale and masks.shape[0] != 0:\n masks = masks.astype(np.float32)\n masks = torch.from_numpy(masks)\n masks = torch.nn.functional.interpolate(\n masks.unsqueeze(0), size=(ori_h, ori_w))\n masks = masks.squeeze(0).detach().numpy()\n if masks.dtype != bool:\n masks = masks >= 0.5\n segms_results = [[] for _ in range(len(self.CLASSES))]\n for j in range(len(dets)):\n segms_results[labels[j]].append(masks[j])\n results.append((dets_results, segms_results))\n else:\n results.append(dets_results)\n return results\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \\\n Tuple[np.ndarray, np.ndarray]:\n \"\"\"The interface for forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n outputs = [out.detach().cpu().numpy() for out in outputs]\n return outputs\n\n def show_result(self,\n img: np.ndarray,\n result: list,\n win_name: str = '',\n show: bool = True,\n score_thr: float = 0.3,\n out_file=None):\n return BaseDetector.show_result(\n self,\n img=img,\n result=result,\n score_thr=score_thr,\n show=show,\n win_name=win_name,\n out_file=out_file)\n\n\n@__BACKEND_MODEL.register_module('single_stage')\nclass PartitionSingleStageModel(End2EndModel):\n \"\"\"Partitioned single stage detection model.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n super().__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n self.model_cfg = model_cfg\n\n def _init_wrapper(self, backend, backend_files, device):\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=['scores', 'boxes'],\n deploy_cfg=self.deploy_cfg)\n\n def partition0_postprocess(self, scores: torch.Tensor,\n bboxes: torch.Tensor):\n \"\"\"Perform post-processing for partition 0.\n\n Args:\n scores (Tensor): The detection scores of shape\n [N, num_boxes, num_classes].\n bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n cfg = self.model_cfg.model.test_cfg\n deploy_cfg = self.deploy_cfg\n\n post_params = get_post_processing_params(deploy_cfg)\n max_output_boxes_per_class = post_params.max_output_boxes_per_class\n iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)\n score_threshold = cfg.get('score_thr', post_params.score_threshold)\n pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \\\n else post_params.pre_top_k\n keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)\n ret = multiclass_nms(\n bboxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold=iou_threshold,\n score_threshold=score_threshold,\n pre_top_k=pre_top_k,\n keep_top_k=keep_top_k)\n ret = [r.cpu() for r in ret]\n return ret\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs):\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n list[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n scores, bboxes = outputs[:2]\n return self.partition0_postprocess(scores, bboxes)\n\n\n@__BACKEND_MODEL.register_module('two_stage')\nclass PartitionTwoStageModel(End2EndModel):\n \"\"\"Partitioned two stage detection model.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n\n self.model_cfg = model_cfg\n\n super().__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n from mmdet.models.builder import build_head, build_roi_extractor\n\n from ..models.roi_heads.bbox_head import bbox_head__get_bboxes\n\n self.bbox_roi_extractor = build_roi_extractor(\n model_cfg.model.roi_head.bbox_roi_extractor)\n self.bbox_head = build_head(model_cfg.model.roi_head.bbox_head)\n\n class Context:\n pass\n\n ctx = Context()\n ctx.cfg = self.deploy_cfg\n self.bbox_head__get_bboxes = partial(bbox_head__get_bboxes, ctx)\n\n def _init_wrapper(self, backend, backend_files, device):\n n = get_backend_file_count(backend)\n num_feat = self.model_cfg['model']['neck']['num_outs']\n partition0_output_names = [\n 'feat/{}'.format(i) for i in range(num_feat)\n ] + ['scores', 'boxes']\n\n self.first_wrapper = BaseBackendModel._build_wrapper(\n backend,\n backend_files[0:n],\n device,\n partition0_output_names,\n deploy_cfg=self.deploy_cfg)\n\n self.second_wrapper = BaseBackendModel._build_wrapper(\n backend,\n backend_files[n:2 * n],\n device, ['cls_score', 'bbox_pred'],\n deploy_cfg=self.deploy_cfg)\n\n def partition0_postprocess(self, x: Sequence[torch.Tensor],\n scores: torch.Tensor, bboxes: torch.Tensor):\n \"\"\"Perform post-processing for partition 0.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale levels.\n scores (Tensor): The detection scores of shape\n [N, num_boxes, num_classes].\n bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].\n\n Returns:\n tuple(Tensor, Tensor): rois and bbox_feats.\n \"\"\"\n # rpn-nms + roi-extractor\n cfg = self.model_cfg.model.test_cfg.rpn\n deploy_cfg = self.deploy_cfg\n\n post_params = get_post_processing_params(deploy_cfg)\n iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)\n score_threshold = cfg.get('score_thr', post_params.score_threshold)\n pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \\\n else post_params.pre_top_k\n keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)\n # only one class in rpn\n max_output_boxes_per_class = keep_top_k\n proposals, _ = multiclass_nms(\n bboxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold=iou_threshold,\n score_threshold=score_threshold,\n pre_top_k=pre_top_k,\n keep_top_k=keep_top_k)\n\n rois = proposals\n batch_index = torch.arange(\n rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n return rois, bbox_feats\n\n def partition1_postprocess(self, rois: torch.Tensor,\n cls_score: torch.Tensor,\n bbox_pred: torch.Tensor,\n img_metas: Sequence[dict]):\n \"\"\"Perform post-processing for partition 1.\n Args:\n rois (torch.Tensor): Input tensor of roi.\n cls_score (torch.Tensor): Scores of all classes.\n bbox_pred (torch.Tensor): Bounding box proposals.\n img_metas (Sequence[dict]): A list of image(s) meta information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class\n labels of shape [N, num_det].\n \"\"\"\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n\n rcnn_test_cfg = self.model_cfg.model.test_cfg.rcnn\n return self.bbox_head__get_bboxes(\n self.bbox_head,\n rois,\n cls_score,\n bbox_pred,\n img_metas[0][0]['img_shape'],\n img_metas[0][0]['scale_factor'],\n cfg=rcnn_test_cfg)\n\n def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of image(s) meta information.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n outputs = self.first_wrapper({'input': imgs})\n outputs = self.first_wrapper.output_to_list(outputs)\n feats = outputs[:-2]\n scores, bboxes = outputs[-2:]\n\n # partition0_postprocess\n rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes)\n\n # partition1 forward\n bbox_feats = bbox_feats.contiguous()\n outputs = self.second_wrapper({'bbox_feats': bbox_feats})\n outputs = self.second_wrapper.output_to_list(outputs)\n cls_score, bbox_pred = outputs[:2]\n\n # partition1_postprocess\n outputs = self.partition1_postprocess(rois, cls_score, bbox_pred,\n img_metas)\n outputs = [out.detach().cpu() for out in outputs]\n return outputs\n\n\n@__BACKEND_MODEL.register_module('ncnn_end2end')\nclass NCNNEnd2EndModel(End2EndModel):\n \"\"\"NCNNEnd2EndModel.\n\n End2end NCNN model inference class. Because it has DetectionOutput layer\n and its output is different from original mmdet style of `dets`, `labels`.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n assert backend == Backend.NCNN, f'only supported ncnn, but give \\\n {backend.value}'\n\n super(NCNNEnd2EndModel,\n self).__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n self.model_cfg = model_cfg\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> List:\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n list[np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n _, _, H, W = imgs.shape\n outputs = self.wrapper({self.input_name: imgs})\n for key, item in outputs.items():\n if item is None:\n return [np.zeros((1, 0, 5)), np.zeros((1, 0))]\n out = self.wrapper.output_to_list(outputs)[0]\n labels = out[:, :, 0] - 1\n scales = torch.tensor([W, H, W, H]).reshape(1, 1, 4)\n scores = out[:, :, 1:2]\n boxes = out[:, :, 2:6] * scales\n dets = torch.cat([boxes, scores], dim=2)\n dets = dets.detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n return [dets, labels]\n\n\n@__BACKEND_MODEL.register_module('sdk')\nclass SDKEnd2EndModel(End2EndModel):\n \"\"\"SDK inference class, converts SDK output to mmdet format.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.has_mask = self.deploy_cfg.codebase_config.get('has_mask', False)\n\n def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Run forward inference.\n\n Args:\n img (Sequence[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of meta info for image(s).\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n dets, labels, masks = self.wrapper.invoke(\n [img[0].contiguous().detach().cpu().numpy()])[0]\n det_results = bbox2result(dets[np.newaxis, ...], labels[np.newaxis,\n ...],\n len(self.CLASSES))\n if self.has_mask:\n segm_results = [[] for _ in range(len(self.CLASSES))]\n ori_h, ori_w = img_metas[0]['ori_shape'][:2]\n for bbox, label, mask in zip(dets, labels, masks):\n img_mask = np.zeros((ori_h, ori_w), dtype=np.uint8)\n left = int(max(np.floor(bbox[0]) - 1, 0))\n top = int(max(np.floor(bbox[1]) - 1, 0))\n img_mask[top:top + mask.shape[0],\n left:left + mask.shape[1]] = mask\n segm_results[label].append(img_mask)\n return [(det_results, segm_results)]\n return [det_results]\n\n\ndef get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs) -> \\\n List[str]:\n \"\"\"Get class name from config. The class name is the `classes` field if it\n is set in the config, or the classes in `module_dict` of MMDet whose type\n is set in the config.\n\n Args:\n model_cfg (str | mmcv.Config): Input model config file or\n Config object.\n\n Returns:\n List[str]: A list of string specifying names of different class.\n \"\"\"\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n\n # For custom dataset\n if 'classes' in model_cfg:\n return list(model_cfg['classes'])\n\n module_dict = DATASETS.module_dict\n data_cfg = model_cfg.data\n classes = None\n module = None\n\n keys = ['test', 'val', 'train']\n\n for key in keys:\n if key in data_cfg:\n if 'classes' in data_cfg[key]:\n classes = list(data_cfg[key]['classes'])\n break\n elif 'type' in data_cfg[key]:\n module = module_dict[data_cfg[key]['type']]\n break\n\n if classes is None and module is None:\n raise RuntimeError(f'No dataset config found in: {model_cfg}')\n\n if classes is not None:\n return classes\n else:\n return module.CLASSES\n\n\ndef build_object_detection_model(model_files: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n device: str, **kwargs):\n \"\"\"Build object detection model for different backends.\n\n Args:\n model_files (Sequence[str]): Input model file(s).\n model_cfg (str | mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str | mmcv.Config): Input deployment config file or\n Config object.\n device (str): Device to input model\n\n Returns:\n End2EndModel: Detector for a configured backend.\n \"\"\"\n # load cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n\n backend = get_backend(deploy_cfg)\n class_names = get_classes_from_config(model_cfg)\n\n partition_config = get_partition_config(deploy_cfg)\n if partition_config is not None:\n partition_type = partition_config.get('type', None)\n else:\n codebase_config = get_codebase_config(deploy_cfg)\n # Default Config is 'end2end'\n partition_type = codebase_config.get('model_type', 'end2end')\n\n backend_detector = __BACKEND_MODEL.build(\n partition_type,\n backend=backend,\n backend_files=model_files,\n class_names=class_names,\n device=device,\n model_cfg=model_cfg,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n return backend_detector\n" ]
[ [ "numpy.argsort", "numpy.array" ], [ "torch.isinf", "torch.cat", "torch.from_numpy", "torch.tensor", "numpy.concatenate", "numpy.floor", "torch.arange", "torch.stack", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tcapelle/tsai
[ "36a2f704abf174515c55115832f08ea2d9753e14" ]
[ "tsai/models/MINIROCKET.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/111b_models.MINIROCKET.ipynb (unless otherwise specified).\n\n__all__ = ['MiniRocketClassifier', 'load_minirocket', 'MiniRocketRegressor', 'load_minirocket',\n 'MiniRocketVotingClassifier', 'get_minirocket_preds', 'MiniRocketVotingRegressor']\n\n# Cell\nimport sklearn\nfrom sklearn.metrics import make_scorer\nfrom sklearn.linear_model import RidgeCV, RidgeClassifierCV\nfrom sklearn.ensemble import VotingClassifier, VotingRegressor\nfrom ..imports import *\nfrom ..utils import *\nfrom ..data.external import *\nfrom .layers import *\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# Cell\nclass MiniRocketClassifier(sklearn.pipeline.Pipeline):\n \"\"\"Time series classification using MINIROCKET features and a linear classifier\"\"\"\n def __init__(self, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):\n \"\"\" MiniRocketClassifier is recommended for up to 10k time series.\n\n For a larger dataset, you can use MINIROCKET (in Pytorch).\n scoring = None --> defaults to accuracy.\n \"\"\"\n\n # Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by\n # Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketClassifier\")\n\n self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,\n max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state)),\n ('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas,\n normalize=normalize_features,\n scoring=scoring,\n class_weight=class_weight,\n **kwargs))]\n store_attr()\n self._validate_steps()\n\n def __repr__(self):\n return f'Pipeline(steps={self.steps.copy()})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketClassifier')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef load_minirocket(fname, path='./models'):\n path = Path(path)\n filename = path/fname\n with open(f'{filename}.pkl', 'rb') as input:\n output = pickle.load(input)\n return output\n\n# Cell\nclass MiniRocketRegressor(sklearn.pipeline.Pipeline):\n \"\"\"Time series regression using MINIROCKET features and a linear regressor\"\"\"\n def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):\n \"\"\" MiniRocketRegressor is recommended for up to 10k time series.\n\n For a larger dataset, you can use MINIROCKET (in Pytorch).\n scoring = None --> defaults to r2.\n \"\"\"\n\n # Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by\n # Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketRegressor\")\n\n self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,\n max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state)),\n ('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]\n store_attr()\n self._validate_steps()\n\n def __repr__(self):\n return f'Pipeline(steps={self.steps.copy()})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketRegressor')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef load_minirocket(fname, path='./models'):\n path = Path(path)\n filename = path/fname\n with open(f'{filename}.pkl', 'rb') as input:\n output = pickle.load(input)\n return output\n\n# Cell\nclass MiniRocketVotingClassifier(VotingClassifier):\n \"\"\"Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting\"\"\"\n def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):\n store_attr()\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketVotingClassifier\")\n\n estimators = [(f'est_{i}', MiniRocketClassifier(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,\n verbose=verbose, scoring=scoring, class_weight=class_weight, **kwargs))\n for i in range(n_estimators)]\n super().__init__(estimators, voting='hard', weights=weights, n_jobs=n_jobs, verbose=verbose)\n\n def __repr__(self):\n return f'MiniRocketVotingClassifier(n_estimators={self.n_estimators}, \\nsteps={self.estimators[0][1].steps})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketVotingClassifier')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef get_minirocket_preds(X, fname, path='./models', model=None):\n if X.ndim == 1: X = X[np.newaxis][np.newaxis]\n elif X.ndim == 2: X = X[np.newaxis]\n if model is None:\n model = load_minirocket(fname=fname, path=path)\n return model.predict(X)\n\n# Cell\nclass MiniRocketVotingRegressor(VotingRegressor):\n \"\"\"Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor\"\"\"\n def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):\n store_attr()\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketVotingRegressor\")\n\n estimators = [(f'est_{i}', MiniRocketRegressor(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,\n verbose=verbose, scoring=scoring, **kwargs))\n for i in range(n_estimators)]\n super().__init__(estimators, weights=weights, n_jobs=n_jobs, verbose=verbose)\n\n def __repr__(self):\n return f'MiniRocketVotingRegressor(n_estimators={self.n_estimators}, \\nsteps={self.estimators[0][1].steps})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketVotingRegressor')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)" ]
[ [ "sklearn.linear_model.RidgeCV", "sklearn.linear_model.RidgeClassifierCV" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
learsi1911/GAMA_pygmo_v4
[ "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0", "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0", "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0" ]
[ "tests/unit/test_scikitlearn.py", "gama/search_methods/pygmo_search27.py", "gama/search_methods/pygmo_search25.py" ]
[ "import pandas as pd\nfrom sklearn.datasets import load_iris\nfrom gama.genetic_programming.compilers.scikitlearn import (\n evaluate_individual,\n compile_individual,\n evaluate_pipeline,\n)\nfrom gama.utilities.metrics import Metric, scoring_to_metric\n\n\ndef test_evaluate_individual(SS_BNB):\n import datetime\n\n reported_start_time = datetime.datetime.now()\n\n def fake_evaluate_pipeline(pipeline, *args, **kwargs):\n # predictions, scores, estimators, errors\n return None, (1.0,), [], None\n\n evaluation = evaluate_individual(\n SS_BNB, evaluate_pipeline=fake_evaluate_pipeline, add_length_to_score=True,\n )\n individual = evaluation.individual\n assert individual == SS_BNB\n assert hasattr(individual, \"fitness\")\n assert individual.fitness.values == (1.0, -2)\n assert (individual.fitness.start_time - reported_start_time).total_seconds() < 1.0\n\n\ndef test_compile_individual(SS_BNB):\n from sklearn.naive_bayes import BernoulliNB\n from sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n pipeline = compile_individual(SS_BNB)\n assert 2 == len(pipeline.steps)\n assert isinstance(pipeline.steps[0][1], StandardScaler)\n assert isinstance(pipeline.steps[1][1], BernoulliNB)\n\n mm_scale = [(\"scaler\", MinMaxScaler())]\n extended_pipeline = compile_individual(SS_BNB, preprocessing_steps=mm_scale)\n assert 3 == len(extended_pipeline.steps)\n assert isinstance(extended_pipeline.steps[0][1], MinMaxScaler)\n assert isinstance(extended_pipeline.steps[1][1], StandardScaler)\n assert isinstance(extended_pipeline.steps[2][1], BernoulliNB)\n\n\ndef test_evaluate_pipeline(SS_BNB):\n x, y = load_iris(return_X_y=True)\n x, y = pd.DataFrame(x), pd.Series(y)\n\n prediction, scores, estimators, errors = evaluate_pipeline(\n SS_BNB.pipeline, x, y, timeout=60, metrics=scoring_to_metric(\"accuracy\"),\n )\n assert 1 == len(scores)\n assert errors is None\n assert 5 == len(estimators)\n assert prediction.shape == (150,)\n\n\ndef test_evaluate_invalid_pipeline(InvalidLinearSVC):\n x, y = load_iris(return_X_y=True)\n x, y = pd.DataFrame(x), pd.Series(y)\n\n prediction, scores, estimators, error = evaluate_pipeline(\n InvalidLinearSVC.pipeline,\n x,\n y,\n timeout=60,\n metrics=scoring_to_metric(\"accuracy\"),\n )\n assert (float(\"-inf\"),) == scores\n assert str(error).startswith(\"Unsupported set of arguments:\")\n assert str(error).endswith(\"penalty='l1', loss='squared_hinge', dual=True\")\n assert estimators is None\n assert prediction is None\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 26 16:56:01 2021\n\n@author: 20210595\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 26 15:51:34 2021\n\n@author: 20210595\n\"\"\"\n\nfrom gama.genetic_programming.components.individual import Individual\nfrom gama.genetic_programming.compilers.scikitlearn import compile_individual\nfrom gama.genetic_programming.components.primitive_node import PrimitiveNode\nfrom gama.genetic_programming.components.primitive import Primitive\nfrom gama.genetic_programming.components.terminal import Terminal\n\n# Packages for pygmo\nimport os\nimport pickle\nfrom numpy import genfromtxt\nimport numpy as np\nimport pygmo as pg\nfrom pygmo import *\nfrom gama.configuration.bounds_pygmo import (\n upperBound, \n lowerBound, \n vector_support,\n count_aux\n ) \nfrom gama.configuration.create_individuals import ValuesSearchSpace\n\nimport logging\nfrom functools import partial\nfrom typing import Optional, Any, Tuple, Dict, List, Callable\n\nimport pandas as pd\n\nfrom gama.genetic_programming.components import Individual\nfrom gama.genetic_programming.operator_set import OperatorSet\nfrom gama.logging.evaluation_logger import EvaluationLogger\nfrom gama.search_methods.base_search import BaseSearch\nfrom gama.utilities.generic.async_evaluator import AsyncEvaluator\n#from gama.utilities.generic.async_evaluator_pygmo import AsyncEvaluator\n\nlog = logging.getLogger(__name__)\n\n\nclass SearchPygmo(BaseSearch):\n \"\"\" Perform asynchronous evolutionary optimization.\n\n Parameters\n ----------\n population_size: int, optional (default=50)\n Maximum number of individuals in the population at any time.\n\n max_n_evaluations: int, optional (default=None)\n If specified, only a maximum of `max_n_evaluations` individuals are evaluated.\n If None, the algorithm will be run until interrupted by the user or a timeout.\n\n restart_callback: Callable[[], bool], optional (default=None)\n Function which takes no arguments and returns True if search restart.\n \"\"\"\n\n def __init__(\n self,\n population_size: Optional[int] = None,\n max_n_evaluations: Optional[int] = None,\n restart_callback: Optional[Callable[[], bool]] = None,\n ):\n super().__init__()\n # maps hyperparameter -> (set value, default)\n self._hyperparameters: Dict[str, Tuple[Any, Any]] = dict(\n population_size=(population_size, 50),\n restart_callback=(restart_callback, None),\n max_n_evaluations=(max_n_evaluations, None),\n )\n self.output = []\n\n def get_parent(evaluation, n) -> str:\n \"\"\" retrieves the nth parent if it exists, '' otherwise. \"\"\"\n if len(evaluation.individual.meta.get(\"parents\", [])) > n:\n return evaluation.individual.meta[\"parents\"][n]\n return \"\"\n\n self.logger = partial(\n EvaluationLogger,\n extra_fields=dict(\n parent0=partial(get_parent, n=0),\n parent1=partial(get_parent, n=1),\n origin=lambda e: e.individual.meta.get(\"origin\", \"unknown\"),\n ),\n )\n\n def dynamic_defaults(self, x: pd.DataFrame, y: pd.DataFrame, time_limit: float):\n pass\n\n def search(self, operations: OperatorSet, start_candidates: List[Individual]):\n self.output = pygmo_serach(\n operations, self.output, start_candidates, **self.hyperparameters\n ) \n\n# nueva = 2\n# variable = 'f%d' % nueva\n# with open('output.txt', 'w') as variable:\n# print(variable)\n# variable.write('Hi there!')\n \ndef loss_function(ops: OperatorSet, ind1: Individual) -> Individual:\n with AsyncEvaluator() as async_:\n async_.submit(ops.evaluate, ind1)\n future = ops.wait_next(async_)\n if future.exception is None:\n individual_prototype = future.result.individual\n return individual_prototype\n\nclass AutoMLProblem:\n #save_whiout_evaluation = []\n contador = 0\n def __init__(self, ops):\n self.operator = ops\n self.output = []\n self.count = 0\n self.name = \"parrot.pkl\"\n \n # Define objectives\n def fitness(self, x):\n path = 'C:/Users/20210595/Documents/PhD/Experiments/gamaPyGMO/gama-master/pickle_gama/'\n path = path + self.name\n AutoMLProblem.contador += 1\n instance_individual = ValuesSearchSpace(x)\n individual_from_x = instance_individual.get_individuals()\n individual_to_use = self._loss_function(self.operator, individual_from_x)\n self.output.append(individual_to_use)\n with open(path, 'wb') as f:\n pickle.dump(self.output, f)\n if AutoMLProblem.contador > 2:\n try:\n with open(path, 'rb') as f:\n self.output = pickle.load(f)\n self.output.append(individual_to_use)\n print('len self.output', len(self.output))\n with open(path, 'wb') as f:\n pickle.dump(self.output, f)\n except:\n print(\"entré a la excepción de pickle\")\n self.count += 1\n self.name = 'parrot%d' % self.count + \".pkl\"\n with open(path, 'wb') as f:\n pickle.dump(self.output, f)\n print(\"Individual evaluated with PyGMO Search Multi-Archipelago\", individual_to_use)\n f1 = individual_to_use.fitness.values[0]\n if f1 == -np.inf:\n f1 = -1000\n return [-f1]\n \n # Define bounds\n def get_bounds(self):\n lower = lowerBound\n upper = upperBound\n return (lower, upper)\n \n def _loss_function(self, ops: OperatorSet, ind1: Individual) -> Individual:\n #individual = ops.evaluate(ind1).individual\n print(\"Hi Pieter\", **AsyncEvaluator.defaults)\n #help(ops.evaluate)\n result = ops.evaluate(ind1)\n #result = ops.evaluate(ind1, **AsyncEvaluator.defaults)\n return result.individual\n \n # Return function name\n def get_name(self):\n return \"AutoMLProblem\"\n \n \ndef pygmo_serach(\n ops: OperatorSet,\n output: List[Individual],\n start_candidates: List[Individual],\n restart_callback: Optional[Callable[[], bool]] = None,\n max_n_evaluations: Optional[int] = None,\n population_size: int = 5,\n islands: int = 5,\n iters: int = 5,\n) -> List[Individual]:\n \n current_population = output\n \n # with AsyncEvaluator() as async_:\n # for individual in start_candidates:\n # async_.submit(ops.evaluate, individual)\n # future = ops.wait_next(async_)\n # if future.exception is None:\n # individual_prototype = future.result.individual\n # print(\"Individual start_candidates evaluated\", individual_prototype)\n # current_population.append(individual_prototype)\n # print(individual_prototype.fitness.values[0], type)\n # if individual_prototype.fitness.values[0] == -np.inf:\n # print(\"infinito\")\n print(AsyncEvaluator.defaults) \n print(\"START with pygmo\")\n algo = pg.algorithm(pg.de(gen = iters))\n prob = pg.problem(AutoMLProblem(ops)) \n # The initial population\n pop = pg.population(prob)\n class_support = AutoMLProblem(ops)\n x_vectors = genfromtxt('x_to_save.csv', delimiter=',')\n f_vectors = genfromtxt('f_to_save.csv', delimiter=',')\n for i in range(len(x_vectors)):\n if f_vectors[i] == -np.inf:\n f_vectors[i] = -10000\n pop.push_back(x = x_vectors[i].tolist(), f = [-f_vectors[i]])\n archi = pg.archipelago(n=4, algo=algo, pop=pop)\n print(\"CREATION OF THE ARCHIPELAGO, IT WILL START THE EVOLUTION IN PARALLEL\")\n print(archi) \n archi.get_champions_f() \n print(archi.get_champions_f()) \n archi.evolve()\n archi.wait()\n archi.wait_check()\n archi.get_champions_f() \n print(archi.get_champions_f()) \n print(\"IT JUST FINISH\")\n print(archi)\n archi.get_champions_f() \n print(archi.get_champions_f()) \n final_lista = []\n path = \"C:/Users/20210595/Documents/PhD/Experiments/gamaPyGMO/gama-master/pickle_gama/\"\n for root, dirs, files, in os.walk(path):\n for file in files:\n if file.endswith(\".pkl\"):\n with open(root, 'rb') as f:\n new_lista = pickle.load(f)\n final_lista = final_lista + new_lista\n print(\"All the individuals\", final_lista)\n print(\"Longitud final\", len(final_lista))\n current_population=final_lista\n return current_population\n\n ", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 26 14:50:10 2021\n\n@author: 20210595\n\"\"\"\n\nfrom gama.genetic_programming.components.individual import Individual\nfrom gama.genetic_programming.compilers.scikitlearn import compile_individual\nfrom gama.genetic_programming.components.primitive_node import PrimitiveNode\nfrom gama.genetic_programming.components.primitive import Primitive\nfrom gama.genetic_programming.components.terminal import Terminal\n\n# Packages for pygmo\nimport pickle\nfrom numpy import genfromtxt\nimport numpy as np\nimport pygmo as pg\nfrom pygmo import *\nfrom gama.configuration.bounds_pygmo import (\n upperBound, \n lowerBound, \n vector_support\n ) \nfrom gama.configuration.create_individuals import ValuesSearchSpace\n\nimport logging\nfrom functools import partial\nfrom typing import Optional, Any, Tuple, Dict, List, Callable\n\nimport pandas as pd\n\nfrom gama.genetic_programming.components import Individual\nfrom gama.genetic_programming.operator_set import OperatorSet\nfrom gama.logging.evaluation_logger import EvaluationLogger\nfrom gama.search_methods.base_search import BaseSearch\nfrom gama.utilities.generic.async_evaluator import AsyncEvaluator\n#from gama.utilities.generic.async_evaluator_pygmo import AsyncEvaluator\n\nlog = logging.getLogger(__name__)\n\n\nclass SearchPygmo(BaseSearch):\n \"\"\" Perform asynchronous evolutionary optimization.\n\n Parameters\n ----------\n population_size: int, optional (default=50)\n Maximum number of individuals in the population at any time.\n\n max_n_evaluations: int, optional (default=None)\n If specified, only a maximum of `max_n_evaluations` individuals are evaluated.\n If None, the algorithm will be run until interrupted by the user or a timeout.\n\n restart_callback: Callable[[], bool], optional (default=None)\n Function which takes no arguments and returns True if search restart.\n \"\"\"\n\n def __init__(\n self,\n population_size: Optional[int] = None,\n max_n_evaluations: Optional[int] = None,\n restart_callback: Optional[Callable[[], bool]] = None,\n ):\n super().__init__()\n # maps hyperparameter -> (set value, default)\n self._hyperparameters: Dict[str, Tuple[Any, Any]] = dict(\n population_size=(population_size, 50),\n restart_callback=(restart_callback, None),\n max_n_evaluations=(max_n_evaluations, None),\n )\n self.output = []\n\n def get_parent(evaluation, n) -> str:\n \"\"\" retrieves the nth parent if it exists, '' otherwise. \"\"\"\n if len(evaluation.individual.meta.get(\"parents\", [])) > n:\n return evaluation.individual.meta[\"parents\"][n]\n return \"\"\n\n self.logger = partial(\n EvaluationLogger,\n extra_fields=dict(\n parent0=partial(get_parent, n=0),\n parent1=partial(get_parent, n=1),\n origin=lambda e: e.individual.meta.get(\"origin\", \"unknown\"),\n ),\n )\n\n def dynamic_defaults(self, x: pd.DataFrame, y: pd.DataFrame, time_limit: float):\n pass\n\n def search(self, operations: OperatorSet, start_candidates: List[Individual]):\n self.output = pygmo_serach(\n operations, self.output, start_candidates, **self.hyperparameters\n ) \n\n# nueva = 2\n# variable = 'f%d' % nueva\n# with open('output.txt', 'w') as variable:\n# print(variable)\n# variable.write('Hi there!')\n \ndef loss_function(ops: OperatorSet, ind1: Individual) -> Individual:\n with AsyncEvaluator() as async_:\n async_.submit(ops.evaluate, ind1)\n future = ops.wait_next(async_)\n if future.exception is None:\n individual_prototype = future.result.individual\n return individual_prototype\n \n\nclass AutoMLProblem:\n #save_whiout_evaluation = []\n contador = 0\n def __init__(self, ops):\n self.operator = ops\n self.output = []\n self.count = 0\n \n # Define objectives\n def fitness(self, x):\n AutoMLProblem.contador += 1\n instance_individual = ValuesSearchSpace(x)\n individual_from_x = instance_individual.get_individuals()\n individual_to_use = self._loss_function(self.operator, individual_from_x)\n self.output.append(individual_to_use)\n with open('parrot.pkl', 'wb') as f:\n pickle.dump(self.output, f)\n if AutoMLProblem.contador > 2:\n with open('parrot.pkl', 'rb') as f:\n self.output = pickle.load(f)\n self.output.append(individual_to_use)\n print('len self.output', len(self.output))\n with open('parrot.pkl', 'wb') as f:\n pickle.dump(self.output, f)\n #AutoMLProblem.save_whiout_evaluation.append(individual_from_x)\n # self.output.append(individual_to_use)\n print(\"Individual evaluated with PyGMO Search Multi-Archipelago\", individual_to_use)\n #AutoMLProblem.save_ind.append(individual_to_use)\n #self.output = AutoMLProblem.save_ind\n f1 = individual_to_use.fitness.values[0]\n if f1 == -np.inf:\n f1 = -1000\n #print(AutoMLProblem.save_ind)\n return [-f1]\n \n # Define bounds\n def get_bounds(self):\n lower = lowerBound\n upper = upperBound\n return (lower, upper)\n \n def _loss_function(self, ops: OperatorSet, ind1: Individual) -> Individual:\n #individual = ops.evaluate(ind1).individual\n print(\"Hi Pieter\", **AsyncEvaluator.defaults)\n #help(ops.evaluate)\n result = ops.evaluate(ind1)\n #result = ops.evaluate(ind1, **AsyncEvaluator.defaults)\n return result.individual\n \n # Return function name\n def get_name(self):\n return \"AutoMLProblem\"\n \n \ndef pygmo_serach(\n ops: OperatorSet,\n output: List[Individual],\n start_candidates: List[Individual],\n restart_callback: Optional[Callable[[], bool]] = None,\n max_n_evaluations: Optional[int] = None,\n population_size: int = 5,\n islands: int = 5,\n iters: int = 5,\n) -> List[Individual]:\n \n current_population = output\n \n # with AsyncEvaluator() as async_:\n # for individual in start_candidates:\n # async_.submit(ops.evaluate, individual)\n # future = ops.wait_next(async_)\n # if future.exception is None:\n # individual_prototype = future.result.individual\n # print(\"Individual start_candidates evaluated\", individual_prototype)\n # current_population.append(individual_prototype)\n # print(individual_prototype.fitness.values[0], type)\n # if individual_prototype.fitness.values[0] == -np.inf:\n # print(\"infinito\")\n print(AsyncEvaluator.defaults) \n print(\"START with pygmo\")\n algo = pg.algorithm(pg.de(gen = iters))\n prob = pg.problem(AutoMLProblem(ops)) \n # The initial population\n pop = pg.population(prob)\n class_support = AutoMLProblem(ops)\n x_vectors = genfromtxt('x_to_save.csv', delimiter=',')\n f_vectors = genfromtxt('f_to_save.csv', delimiter=',')\n for i in range(len(x_vectors)):\n if f_vectors[i] == -np.inf:\n f_vectors[i] = -10000\n pop.push_back(x = x_vectors[i].tolist(), f = [-f_vectors[i]])\n archi = pg.archipelago(n=4, algo=algo, pop=pop)\n print(\"CREATION OF THE ARCHIPELAGO, IT WILL START THE EVOLUTION IN PARALLEL\")\n print(archi) \n archi.get_champions_f() \n print(archi.get_champions_f()) \n archi.evolve()\n archi.wait()\n archi.wait_check()\n archi.get_champions_f() \n print(archi.get_champions_f()) \n print(\"IT JUST FINISH\")\n print(archi)\n archi.get_champions_f() \n print(archi.get_champions_f()) \n with open('parrot.pkl', 'rb') as f:\n mynewlist = pickle.load(f)\n # x_of_island_champion = archi.get_champions_x()\n # final_output = []\n # for i in x_of_island_champion:\n # final_instance = ValuesSearchSpace(i)\n # individual_from_x = final_instance.get_individuals()\n # individual_to_use = loss_function(ops, individual_from_x)\n # final_output.append(individual_to_use)\n # current_population = current_population + final_output\n print(\"All the individuals\", mynewlist)\n print(\"Longitud final\", len(mynewlist))\n current_population=mynewlist\n return current_population\n\n " ]
[ [ "sklearn.datasets.load_iris", "pandas.Series", "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler" ], [ "numpy.genfromtxt" ], [ "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pandinosaurus/doctr
[ "3d645ce7d3d4fe36aa53537d4e4f92507f6cd422", "7bbdf4b1e5f7e9a28a7047dcd13eb2a5501643ef" ]
[ "demo/app.py", "api/tests/routes/test_detection.py" ]
[ "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport streamlit as st\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport cv2\nimport tensorflow as tf\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.io import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import visualize_page\n\nDET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet18_rotation\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"docTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Instructions\n st.markdown(\"*Hint: click on the top-right corner of an image to enlarge it!*\")\n # Set the columns\n cols = st.columns((1, 1, 1, 1))\n cols[0].subheader(\"Input page\")\n cols[1].subheader(\"Segmentation heatmap\")\n cols[2].subheader(\"OCR output\")\n cols[3].subheader(\"Page reconstitution\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read())\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n page_idx = st.sidebar.selectbox(\"Page selection\", [idx + 1 for idx in range(len(doc))]) - 1\n cols[0].image(doc[page_idx])\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze page\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(\n det_arch, reco_arch, pretrained=True,\n assume_straight_pages=(det_arch != \"linknet_resnet18_rotation\")\n )\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # Plot OCR output\n out = predictor([doc[page_idx]])\n fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)\n cols[2].pyplot(fig)\n\n # Page reconsitution under input page\n page_export = out.pages[0].export()\n if det_arch != \"linknet_resnet18_rotation\":\n img = out.pages[0].synthesize()\n cols[3].image(img, clamp=True)\n\n # Display JSON\n st.markdown(\"\\nHere are your analysis results in JSON format:\")\n st.json(page_export)\n\n\nif __name__ == '__main__':\n main()\n", "import numpy as np\nimport pytest\nfrom scipy.optimize import linear_sum_assignment\n\nfrom doctr.utils.metrics import box_iou\n\n\[email protected]\nasync def test_text_detection(test_app_asyncio, mock_detection_image):\n\n response = await test_app_asyncio.post(\"/detection\", files={'file': mock_detection_image})\n assert response.status_code == 200\n json_response = response.json()\n\n gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)\n gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654\n gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339\n\n # Check that IoU with GT if reasonable\n assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]\n pred_boxes = np.array([elt['box'] for elt in json_response])\n iou_mat = box_iou(gt_boxes, pred_boxes)\n gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)\n is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8\n assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]\n" ]
[ [ "matplotlib.pyplot.subplots", "tensorflow.config.experimental.list_physical_devices", "tensorflow.squeeze", "tensorflow.config.experimental.set_memory_growth" ], [ "scipy.optimize.linear_sum_assignment", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
apuranik1/rl-examples
[ "af807bd9311e056e8690ee4bc5abbc63a91381e9", "af807bd9311e056e8690ee4bc5abbc63a91381e9" ]
[ "rl_examples/approximate_td.py", "rl_examples/mountain_car.py" ]
[ "from typing import Deque, Tuple\nfrom collections import deque\n\nimport numpy as np\n\nfrom .psfa import PSFAAgent, PSFAEnvironment, TState, TAction\nfrom .approximation import TrainableEstimator, Featurizer\n\n\nclass ApproximationTDNAgent(PSFAAgent[TState, TAction]):\n \"\"\"A bootstrapping agent using n-step SARSA\"\"\"\n\n def __init__(\n self,\n env: PSFAEnvironment[TState, TAction],\n featurizer: Featurizer[Tuple[TState, TAction]],\n estimator: TrainableEstimator,\n exploration_rate: float,\n n: int,\n lr: float,\n use_average_reward: bool = False,\n ):\n self.env = env\n self.featurizer = featurizer\n self.estimator = estimator\n self.differential = use_average_reward\n self.avg_reward = 0.0\n self.epsilon = exploration_rate\n self.n = n\n self.lr = lr\n self.data_queue: Deque[Tuple[TState, TAction, float]] = deque()\n\n def action(self, state: TState) -> TAction:\n available_actions = self.env.get_actions(state)\n if np.random.rand() < self.epsilon:\n return np.random.choice(available_actions) # type: ignore\n else:\n batch_featurized = np.stack(\n [self._featurize(state, action) for action in available_actions]\n )\n value_estimates = self.estimator.batch_estimate(batch_featurized)\n max_idx: int = np.argmax(value_estimates)\n return available_actions[max_idx]\n\n def _evaluate_queue(self, trailing_rewards: float) -> float:\n est = trailing_rewards + sum(reward for s, a, reward in self.data_queue)\n if self.differential:\n return est - len(self.data_queue) * self.avg_reward\n else:\n return est\n\n def _featurize(self, state: TState, action: TAction) -> np.ndarray:\n return self.featurizer.featurize((state, action))\n\n def act_and_train(self, t: int) -> Tuple[TState, TAction, float]:\n state = self.env.state\n action = self.action(state)\n reward = self.env.take_action(action)\n if len(self.data_queue) == self.n:\n trailing_estimate = self.estimator.estimate(self._featurize(state, action))\n reward_estimate = self._evaluate_queue(trailing_estimate)\n old_state, old_action, old_reward = self.data_queue.popleft()\n current_estimate = self.estimator.estimate_and_update(\n self._featurize(old_state, old_action), reward_estimate\n )\n self.avg_reward += self.lr * (reward_estimate - current_estimate)\n self.data_queue.append((state, action, reward))\n return state, action, reward\n\n def episode_end(self) -> None:\n while self.data_queue:\n reward_estimate = self._evaluate_queue(0.0)\n old_state, old_action, old_reward = self.data_queue.popleft()\n current_estimate = self.estimator.estimate_and_update(\n self._featurize(old_state, old_action), reward_estimate\n )\n self.avg_reward += self.lr * (reward_estimate - current_estimate)\n", "import math\nfrom enum import Enum\nfrom typing import Sequence, Tuple\n\nimport numpy as np\n\nfrom rl_examples.approximation import Featurizer\nfrom rl_examples.psfa import PSFAEnvironment, State\n\n\nclass MountainCarState(State):\n def __init__(self, x: float, vx: float, terminal: bool):\n super().__init__(terminal)\n self.x = x\n self.vx = vx\n self.y = math.sin(3 * self.x)\n\n def g_x(self) -> float:\n \"\"\"Compute the acceleration due to gravity\"\"\"\n # corresponds to no physical reality\n return -2.5e-3 * math.cos(3 * self.x)\n\n def __str__(self) -> str:\n return f\"MountainCarState(x={self.x}, vx={self.vx})\"\n\n\nclass MountainCarAction(Enum):\n Forward = 1\n Neutral = 0\n Reverse = -1\n\n\nclass MountainCarEnvironment(PSFAEnvironment[MountainCarState, MountainCarAction]):\n\n MIN_X = -1.2\n MAX_X = 0.5\n MIN_VX = -0.07\n MAX_VX = 0.07\n\n def __init__(self) -> None:\n self.reset()\n\n def reset(self) -> None:\n # randomly select starting position in [-0.6, 0.4)\n x = np.random.rand() - 0.6\n self._state = self.make_state(x, 0.0)\n\n @property\n def state(self) -> MountainCarState:\n return self._state\n\n def get_actions(\n self, state: MountainCarState = None\n ) -> Sequence[MountainCarAction]:\n return list(MountainCarAction)\n\n def take_action(self, action: MountainCarAction) -> float:\n old = self._state\n new_vx = np.clip(\n old.vx + 0.001 * action.value + old.g_x(),\n MountainCarEnvironment.MIN_VX,\n MountainCarEnvironment.MAX_VX,\n )\n new_x = old.x + new_vx\n self._state = self.make_state(new_x, new_vx)\n return -1.0\n\n @staticmethod\n def make_state(x: float, vx: float) -> MountainCarState:\n if x >= MountainCarEnvironment.MAX_X:\n return MountainCarState(MountainCarEnvironment.MAX_X, 0.0, True)\n elif x <= MountainCarEnvironment.MIN_X:\n return MountainCarState(MountainCarEnvironment.MIN_X, 0.0, False)\n else:\n return MountainCarState(x, vx, False)\n\n\nclass MountainCarFeaturizer(Featurizer[Tuple[MountainCarState, MountainCarAction]]):\n def __init__(self) -> None:\n self.indices = {action: i for i, action in enumerate(MountainCarAction)}\n self.action_len = len(self.indices)\n\n def featurize(self, data: Tuple[MountainCarState, MountainCarAction]) -> np.ndarray:\n \"\"\"Build the feature array [x, v_x, acceleration]\"\"\"\n state, action = data\n action_data = np.zeros(self.action_len)\n action_data[self.indices[action]] = 1\n return np.concatenate([action_data, [state.x, 100 * state.vx]])\n\n def output_shape(self) -> Sequence[int]:\n return [self.action_len + 2]\n" ]
[ [ "numpy.argmax", "numpy.random.rand", "numpy.random.choice" ], [ "numpy.concatenate", "numpy.zeros", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dukebw/imgaug
[ "eba6eef5808704926edce97de39af23cab18cb7f", "eba6eef5808704926edce97de39af23cab18cb7f" ]
[ "checks/check_perspective_transform.py", "imgaug/parameters.py" ]
[ "from __future__ import print_function, division\n\nimport numpy as np\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\n\ndef main():\n image = ia.data.quokka(size=0.5)\n kps = [\n ia.KeypointsOnImage(\n [\n ia.Keypoint(x=245, y=203),\n ia.Keypoint(x=365, y=195),\n ia.Keypoint(x=313, y=269),\n ],\n shape=(image.shape[0] * 2, image.shape[1] * 2),\n )\n ]\n kps[0] = kps[0].on(image.shape)\n print(\"image shape:\", image.shape)\n\n augs = [\n iaa.PerspectiveTransform(scale=0.01, name=\"pt001\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.1, name=\"pt01\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.2, name=\"pt02\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.3, name=\"pt03\", keep_size=True),\n iaa.PerspectiveTransform(scale=(0, 0.3), name=\"pt00to03\", keep_size=True),\n ]\n\n print(\"original\", image.shape)\n ia.imshow(kps[0].draw_on_image(image))\n\n print(\"-----------------\")\n print(\"Random aug per image\")\n print(\"-----------------\")\n for aug in augs:\n images_aug = []\n for _ in range(16):\n aug_det = aug.to_deterministic()\n img_aug = aug_det.augment_image(image)\n kps_aug = aug_det.augment_keypoints(kps)[0]\n img_aug_kps = kps_aug.draw_on_image(img_aug)\n img_aug_kps = np.pad(\n img_aug_kps,\n ((1, 1), (1, 1), (0, 0)),\n mode=\"constant\",\n constant_values=255,\n )\n images_aug.append(img_aug_kps)\n print(aug.name)\n ia.imshow(ia.draw_grid(images_aug))\n\n print(\"----------------\")\n print(\"6 channels\")\n print(\"----------------\")\n image6 = np.dstack([image, image])\n image6_aug = augs[1].augment_image(image6)\n ia.imshow(np.hstack([image6_aug[..., 0:3], image6_aug[..., 3:6]]))\n\n\nif __name__ == \"__main__\":\n main()\n", "\"\"\"Classes and methods to use for parameters of augmenters.\n\nThis module contains e.g. classes representing probability\ndistributions (guassian, poisson etc.), classes representing noise sources\nand methods to normalize parameter-related user inputs.\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nimport copy as copy_module\nfrom collections import defaultdict\nfrom abc import ABCMeta, abstractmethod\nimport tempfile\nfrom functools import reduce, wraps\nfrom operator import mul as mul_op\n\nimport numpy as np\nimport six\nimport six.moves as sm\nimport scipy\nimport scipy.stats\nimport imageio\nimport cv2\n\nfrom . import imgaug as ia\nfrom . import dtypes as iadt\nfrom . import random as iarandom\nfrom .external.opensimplex import OpenSimplex\n\n\n# Added in 0.5.0.\n_PREFETCHING_ENABLED = True\n# Added in 0.5.0.\n_NB_PREFETCH = 10000\n# Added in 0.5.0.\n_NB_PREFETCH_STRINGS = 1000\n\n\n# Added in 0.5.0.\ndef _prefetchable(func):\n @wraps(func)\n def _inner(*args, **kwargs):\n param = func(*args, **kwargs)\n return _wrap_leafs_of_param_in_prefetchers(param, _NB_PREFETCH)\n\n return _inner\n\n\n# Added in 0.5.0.\ndef _prefetchable_str(func):\n @wraps(func)\n def _inner(*args, **kwargs):\n param = func(*args, **kwargs)\n return _wrap_leafs_of_param_in_prefetchers(param, _NB_PREFETCH_STRINGS)\n\n return _inner\n\n\n# Added in 0.5.0.\ndef _wrap_param_in_prefetchers(param, nb_prefetch):\n for key, value in param.__dict__.items():\n if isinstance(value, StochasticParameter):\n param.__dict__[key] = _wrap_param_in_prefetchers(value, nb_prefetch)\n\n if param.prefetchable:\n return AutoPrefetcher(param, nb_prefetch)\n return param\n\n\n# Added in 0.5.0.\ndef _wrap_leafs_of_param_in_prefetchers(param, nb_prefetch):\n param_wrapped, _did_wrap_any_child = _wrap_leafs_of_param_in_prefetchers_recursive(\n param, nb_prefetch\n )\n return param_wrapped\n\n\n# Added in 0.5.0.\ndef _wrap_leafs_of_param_in_prefetchers_recursive(param, nb_prefetch):\n # Do not descent into AutoPrefetcher, otherwise we risk turning an\n # AutoPrefetcher(X) into AutoPrefetcher(AutoPrefetcher(X)) if X is\n # prefetchable\n if isinstance(param, AutoPrefetcher):\n # report did_wrap_any_child=True here, so that parent parameters\n # are not wrapped in prefetchers, which could lead to ugly scenarios\n # like AutoPrefetcher(Normal(AutoPrefetcher(Uniform(-1.0, 1.0))),\n return param, True\n\n if isinstance(param, (list, tuple)):\n result = []\n did_wrap_any_child = False\n for param_i in param:\n (\n param_i_wrapped,\n did_wrap_any_child_i,\n ) = _wrap_leafs_of_param_in_prefetchers_recursive(param_i, nb_prefetch)\n result.append(param_i_wrapped)\n did_wrap_any_child = did_wrap_any_child or did_wrap_any_child_i\n\n if not did_wrap_any_child:\n return param, False\n if isinstance(param, tuple):\n return tuple(result), did_wrap_any_child\n return result, did_wrap_any_child\n\n if not isinstance(param, StochasticParameter):\n return param, False\n\n did_wrap_any_child = False\n for key, value in param.__dict__.items():\n param_wrapped, did_wrap_i = _wrap_leafs_of_param_in_prefetchers_recursive(\n value, nb_prefetch\n )\n\n param.__dict__[key] = param_wrapped\n did_wrap_any_child = did_wrap_any_child or did_wrap_i\n\n if param.prefetchable and not did_wrap_any_child and _PREFETCHING_ENABLED:\n return AutoPrefetcher(param, nb_prefetch), True\n return param, did_wrap_any_child\n\n\ndef toggle_prefetching(enabled):\n \"\"\"Toggle prefetching on or off.\n\n Added in 0.5.0.\n\n Parameters\n ----------\n enabled : bool\n Whether enabled is activated (``True``) or off (``False``).\n\n \"\"\"\n # pylint: disable=global-statement\n global _PREFETCHING_ENABLED\n _PREFETCHING_ENABLED = enabled\n\n\nclass toggled_prefetching(object): # pylint: disable=invalid-name\n \"\"\"Context that toggles prefetching on or off depending on a flag.\n\n Added in 0.5.0.\n\n Parameters\n ----------\n enabled : bool\n Whether enabled is activated (``True``) or off (``False``).\n\n \"\"\"\n\n # Added in 0.5.0.\n def __init__(self, enabled):\n self.enabled = enabled\n self._old_state = None\n\n # Added in 0.5.0.\n def __enter__(self):\n # pylint: disable=global-statement\n global _PREFETCHING_ENABLED\n self._old_state = _PREFETCHING_ENABLED\n _PREFETCHING_ENABLED = self.enabled\n\n # Added in 0.5.0.\n def __exit__(self, exception_type, exception_value, exception_traceback):\n # pylint: disable=global-statement\n global _PREFETCHING_ENABLED\n _PREFETCHING_ENABLED = self._old_state\n\n\nclass no_prefetching(toggled_prefetching): # pylint: disable=invalid-name\n \"\"\"Context that deactviates prefetching.\n\n Added in 0.5.0.\n\n \"\"\"\n\n # Added in 0.5.0.\n def __init__(self):\n super(no_prefetching, self).__init__(False)\n\n\ndef _check_value_range(value, name, value_range):\n if value_range is None:\n return True\n\n if isinstance(value_range, tuple):\n assert len(value_range) == 2, (\n \"If 'value_range' is a tuple, it must contain exactly 2 entries, \"\n \"got %d.\" % (len(value_range),)\n )\n\n if value_range[0] is None and value_range[1] is None:\n return True\n\n if value_range[0] is None:\n assert value <= value_range[1], (\n \"Parameter '%s' is outside of the expected value \"\n \"range (x <= %.4f)\" % (name, value_range[1])\n )\n return True\n\n if value_range[1] is None:\n assert value_range[0] <= value, (\n \"Parameter '%s' is outside of the expected value \"\n \"range (%.4f <= x)\" % (name, value_range[0])\n )\n return True\n\n assert value_range[0] <= value <= value_range[1], (\n \"Parameter '%s' is outside of the expected value \"\n \"range (%.4f <= x <= %.4f)\" % (name, value_range[0], value_range[1])\n )\n\n return True\n\n if ia.is_callable(value_range):\n value_range(value)\n return True\n\n raise Exception(\"Unexpected input for value_range, got %s.\" % (str(value_range),))\n\n\n# FIXME this uses _check_value_range, which checks for a<=x<=b, but a produced\n# Uniform parameter has value range a<=x<b.\ndef handle_continuous_param(\n param,\n name,\n value_range=None,\n tuple_to_uniform=True,\n list_to_choice=True,\n prefetch=True,\n):\n result = None\n\n if ia.is_single_number(param):\n _check_value_range(param, name, value_range)\n result = Deterministic(param)\n elif tuple_to_uniform and isinstance(param, tuple):\n assert len(param) == 2, (\n \"Expected parameter '%s' with type tuple to have exactly two \"\n \"entries, but got %d.\" % (name, len(param))\n )\n assert all(\n [ia.is_single_number(v) for v in param]\n ), \"Expected parameter '%s' with type tuple to only contain \" \"numbers, got %s.\" % (\n name,\n [type(v) for v in param],\n )\n _check_value_range(param[0], name, value_range)\n _check_value_range(param[1], name, value_range)\n result = Uniform(param[0], param[1])\n elif list_to_choice and ia.is_iterable(param) and not isinstance(param, tuple):\n assert all(\n [ia.is_single_number(v) for v in param]\n ), \"Expected iterable parameter '%s' to only contain numbers, \" \"got %s.\" % (\n name,\n [type(v) for v in param],\n )\n for param_i in param:\n _check_value_range(param_i, name, value_range)\n result = Choice(param)\n elif isinstance(param, StochasticParameter):\n result = param\n\n if result is not None:\n if prefetch:\n return _wrap_leafs_of_param_in_prefetchers(result, _NB_PREFETCH)\n return result\n\n allowed_type = \"number\"\n list_str = \", list of %s\" % (allowed_type,) if list_to_choice else \"\"\n raise Exception(\n \"Expected %s, tuple of two %s%s or StochasticParameter for %s, \"\n \"got %s.\"\n % (\n allowed_type,\n allowed_type,\n list_str,\n name,\n type(param),\n )\n )\n\n\ndef handle_discrete_param(\n param,\n name,\n value_range=None,\n tuple_to_uniform=True,\n list_to_choice=True,\n allow_floats=True,\n prefetch=True,\n):\n result = None\n\n if ia.is_single_integer(param) or (allow_floats and ia.is_single_float(param)):\n _check_value_range(param, name, value_range)\n result = Deterministic(int(param))\n elif tuple_to_uniform and isinstance(param, tuple):\n assert len(param) == 2, (\n \"Expected parameter '%s' with type tuple to have exactly two \"\n \"entries, but got %d.\" % (name, len(param))\n )\n is_valid_types = all(\n [\n ia.is_single_number(v) if allow_floats else ia.is_single_integer(v)\n for v in param\n ]\n )\n assert (\n is_valid_types\n ), \"Expected parameter '%s' of type tuple to only contain %s, \" \"got %s.\" % (\n name,\n \"number\" if allow_floats else \"integer\",\n [type(v) for v in param],\n )\n\n _check_value_range(param[0], name, value_range)\n _check_value_range(param[1], name, value_range)\n result = DiscreteUniform(int(param[0]), int(param[1]))\n elif list_to_choice and ia.is_iterable(param) and not isinstance(param, tuple):\n is_valid_types = all(\n [\n ia.is_single_number(v) if allow_floats else ia.is_single_integer(v)\n for v in param\n ]\n )\n assert (\n is_valid_types\n ), \"Expected iterable parameter '%s' to only contain %s, \" \"got %s.\" % (\n name,\n \"number\" if allow_floats else \"integer\",\n [type(v) for v in param],\n )\n\n for param_i in param:\n _check_value_range(param_i, name, value_range)\n result = Choice([int(param_i) for param_i in param])\n elif isinstance(param, StochasticParameter):\n result = param\n\n if result is not None:\n if prefetch:\n return _wrap_leafs_of_param_in_prefetchers(result, _NB_PREFETCH)\n return result\n\n allowed_type = \"number\" if allow_floats else \"int\"\n list_str = \", list of %s\" % (allowed_type,) if list_to_choice else \"\"\n raise Exception(\n \"Expected %s, tuple of two %s%s or StochasticParameter for %s, \"\n \"got %s.\"\n % (\n allowed_type,\n allowed_type,\n list_str,\n name,\n type(param),\n )\n )\n\n\n# Added in 0.4.0.\ndef handle_categorical_string_param(param, name, valid_values=None, prefetch=True):\n result = None\n\n if param == ia.ALL and valid_values is not None:\n result = Choice(list(valid_values))\n elif ia.is_string(param):\n if valid_values is not None:\n assert (\n param in valid_values\n ), \"Expected parameter '%s' to be one of: %s. Got: %s.\" % (\n name,\n \", \".join(list(valid_values)),\n param,\n )\n result = Deterministic(param)\n elif isinstance(param, list):\n assert all([ia.is_string(val) for val in param]), (\n \"Expected list provided for parameter '%s' to only contain \"\n \"strings, got types: %s.\"\n % (name, \", \".join([type(v).__name__ for v in param]))\n )\n if valid_values is not None:\n assert all([val in valid_values for val in param]), (\n \"Expected list provided for parameter '%s' to only contain \"\n \"the following allowed strings: %s. Got strings: %s.\"\n % (name, \", \".join(valid_values), \", \".join(param))\n )\n result = Choice(param)\n elif isinstance(param, StochasticParameter):\n result = param\n\n # we currently prefetch only 1k values here instead of 10k, because\n # strings might be rather long\n if result is not None:\n if prefetch:\n return _wrap_leafs_of_param_in_prefetchers(result, _NB_PREFETCH_STRINGS)\n return result\n\n raise Exception(\n \"Expected parameter '%s' to be%s a string, a list of \"\n \"strings or StochasticParameter, got %s.\"\n % (\n name,\n \" imgaug.ALL,\" if valid_values is not None else \"\",\n type(param).__name__,\n )\n )\n\n\ndef handle_discrete_kernel_size_param(\n param, name, value_range=(1, None), allow_floats=True, prefetch=True\n):\n # pylint: disable=invalid-name\n\n result = None, None\n if ia.is_single_integer(param) or (allow_floats and ia.is_single_float(param)):\n _check_value_range(param, name, value_range)\n result = Deterministic(int(param)), None\n elif isinstance(param, tuple):\n assert len(param) == 2, (\n \"Expected parameter '%s' with type tuple to have exactly two \"\n \"entries, but got %d.\" % (name, len(param))\n )\n if all([ia.is_single_integer(param_i) for param_i in param]) or (\n allow_floats and all([ia.is_single_float(param_i) for param_i in param])\n ):\n _check_value_range(param[0], name, value_range)\n _check_value_range(param[1], name, value_range)\n result = DiscreteUniform(int(param[0]), int(param[1])), None\n elif all([isinstance(param_i, StochasticParameter) for param_i in param]):\n result = param[0], param[1]\n else:\n handled = (\n handle_discrete_param(\n param[0], \"%s[0]\" % (name,), value_range, allow_floats=allow_floats\n ),\n handle_discrete_param(\n param[1], \"%s[1]\" % (name,), value_range, allow_floats=allow_floats\n ),\n )\n\n result = handled\n elif ia.is_iterable(param) and not isinstance(param, tuple):\n is_valid_types = all(\n [\n ia.is_single_number(v) if allow_floats else ia.is_single_integer(v)\n for v in param\n ]\n )\n assert (\n is_valid_types\n ), \"Expected iterable parameter '%s' to only contain %s, \" \"got %s.\" % (\n name,\n \"number\" if allow_floats else \"integer\",\n [type(v) for v in param],\n )\n\n for param_i in param:\n _check_value_range(param_i, name, value_range)\n result = Choice([int(param_i) for param_i in param]), None\n elif isinstance(param, StochasticParameter):\n result = param, None\n\n result_pf = []\n for v in result:\n if v is not None and prefetch:\n v = _wrap_leafs_of_param_in_prefetchers(v, _NB_PREFETCH)\n result_pf.append(v)\n\n if result_pf != [None, None]:\n return tuple(result_pf)\n\n raise Exception(\n \"Expected int, tuple/list with 2 entries or StochasticParameter. \"\n \"Got %s.\" % (type(param),)\n )\n\n\ndef handle_probability_param(\n param, name, tuple_to_uniform=False, list_to_choice=False, prefetch=True\n):\n eps = 1e-6\n\n result = None\n\n if param in [True, False, 0, 1]:\n result = Deterministic(int(param))\n elif ia.is_single_number(param):\n assert (\n 0.0 <= param <= 1.0\n ), \"Expected probability of parameter '%s' to be in the interval \" \"[0.0, 1.0], got %.4f.\" % (\n name,\n param,\n )\n if 0.0 - eps < param < 0.0 + eps or 1.0 - eps < param < 1.0 + eps:\n return Deterministic(int(np.round(param)))\n result = Binomial(param)\n elif tuple_to_uniform and isinstance(param, tuple):\n assert all(\n [ia.is_single_number(v) for v in param]\n ), \"Expected parameter '%s' of type tuple to only contain numbers, \" \"got %s.\" % (\n name,\n [type(v) for v in param],\n )\n assert len(param) == 2, (\n \"Expected parameter '%s' of type tuple to contain exactly 2 \"\n \"entries, got %d.\" % (name, len(param))\n )\n assert 0 <= param[0] <= 1.0 and 0 <= param[1] <= 1.0, (\n \"Expected parameter '%s' of type tuple to contain two \"\n \"probabilities in the interval [0.0, 1.0]. \"\n \"Got values %.4f and %.4f.\" % (name, param[0], param[1])\n )\n result = Binomial(Uniform(param[0], param[1]))\n elif list_to_choice and ia.is_iterable(param):\n assert all(\n [ia.is_single_number(v) for v in param]\n ), \"Expected iterable parameter '%s' to only contain numbers, \" \"got %s.\" % (\n name,\n [type(v) for v in param],\n )\n assert all([0 <= p_i <= 1.0 for p_i in param]), (\n \"Expected iterable parameter '%s' to only contain probabilities \"\n \"in the interval [0.0, 1.0], got values %s.\"\n % (name, \", \".join([\"%.4f\" % (p_i,) for p_i in param]))\n )\n result = Binomial(Choice(param))\n elif isinstance(param, StochasticParameter):\n result = param\n\n if result is not None:\n if prefetch:\n return _wrap_leafs_of_param_in_prefetchers(result, _NB_PREFETCH)\n return result\n\n raise Exception(\n \"Expected boolean or number or StochasticParameter for %s, \"\n \"got %s.\"\n % (\n name,\n type(param),\n )\n )\n\n\ndef force_np_float_dtype(val):\n if val.dtype.kind == \"f\":\n return val\n return val.astype(np.float64)\n\n\ndef both_np_float_if_one_is_float(a, b):\n # pylint: disable=invalid-name\n a_f = a.dtype.type in ia.NP_FLOAT_TYPES\n b_f = b.dtype.type in ia.NP_FLOAT_TYPES\n if a_f and b_f:\n return a, b\n if a_f:\n return a, b.astype(np.float64)\n if b_f:\n return a.astype(np.float64), b\n return a.astype(np.float64), b.astype(np.float64)\n\n\ndef draw_distributions_grid(\n params, rows=None, cols=None, graph_sizes=(350, 350), sample_sizes=None, titles=None\n):\n if titles is None:\n titles = [None] * len(params)\n elif titles is False:\n titles = [False] * len(params)\n\n if sample_sizes is not None:\n images = [\n param_i.draw_distribution_graph(size=size_i, title=title_i)\n for param_i, size_i, title_i in zip(params, sample_sizes, titles)\n ]\n else:\n images = [\n param_i.draw_distribution_graph(title=title_i)\n for param_i, title_i in zip(params, titles)\n ]\n\n images_rs = ia.imresize_many_images(images, sizes=graph_sizes)\n grid = ia.draw_grid(images_rs, rows=rows, cols=cols)\n return grid\n\n\ndef show_distributions_grid(\n params, rows=None, cols=None, graph_sizes=(350, 350), sample_sizes=None, titles=None\n):\n ia.imshow(\n draw_distributions_grid(\n params,\n graph_sizes=graph_sizes,\n sample_sizes=sample_sizes,\n rows=rows,\n cols=cols,\n titles=titles,\n )\n )\n\n\[email protected]_metaclass(ABCMeta)\nclass StochasticParameter(object):\n \"\"\"Abstract parent class for all stochastic parameters.\n\n Stochastic parameters are here all parameters from which values are\n supposed to be sampled. Usually the sampled values are to a degree random.\n E.g. a stochastic parameter may be the uniform distribution over the\n interval ``[-10, 10]``. Samples from that distribution (and therefore the\n stochastic parameter) could be ``5.2``, ``-3.7``, ``-9.7``, ``6.4``, etc.\n\n \"\"\"\n\n def __init__(self):\n pass\n\n @property\n def prefetchable(self):\n \"\"\"Determines whether this parameter may be prefetched.\n\n Added in 0.5.0.\n\n Returns\n -------\n bool\n Whether to allow prefetching of this parameter's samples.\n This should usually only be ``True`` for parameters that actually\n perform random sampling, i.e. depend on an RNG.\n\n \"\"\"\n return False\n\n def draw_sample(self, random_state=None):\n \"\"\"\n Draws a single sample value from this parameter.\n\n Parameters\n ----------\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n A seed or random number generator to use during the sampling\n process. If ``None``, the global RNG will be used.\n See also :func:`~imgaug.augmenters.meta.Augmenter.__init__`\n for a similar parameter with more details.\n\n Returns\n -------\n any\n A single sample value.\n\n \"\"\"\n return self.draw_samples(1, random_state=random_state)[0]\n\n def draw_samples(self, size, random_state=None):\n \"\"\"Draw one or more samples from the parameter.\n\n Parameters\n ----------\n size : tuple of int or int\n Number of samples by dimension.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n A seed or random number generator to use during the sampling\n process. If ``None``, the global RNG will be used.\n See also :func:`~imgaug.augmenters.meta.Augmenter.__init__`\n for a similar parameter with more details.\n\n Returns\n -------\n ndarray\n Sampled values. Usually a numpy ndarray of basically any dtype,\n though not strictly limited to numpy arrays. Its shape is expected\n to match `size`.\n\n \"\"\"\n if not isinstance(random_state, iarandom.RNG):\n random_state = iarandom.RNG(random_state)\n samples = self._draw_samples(\n size if not ia.is_single_integer(size) else tuple([size]), random_state\n )\n random_state.advance_()\n return samples\n\n @abstractmethod\n def _draw_samples(self, size, random_state):\n raise NotImplementedError()\n\n def __add__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Add(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter + %s. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __sub__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Subtract(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter - %s. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __mul__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Multiply(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter * %s. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __pow__(self, other, z=None):\n if z is not None:\n raise NotImplementedError(\n \"Modulo power is currently not supported by \" \"StochasticParameter.\"\n )\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Power(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter ** %s. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __div__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Divide(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter / %s. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __truediv__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Divide(self, other)\n raise Exception(\n \"Invalid datatypes in: StochasticParameter / %s (truediv). \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __floordiv__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Discretize(Divide(self, other))\n raise Exception(\n \"Invalid datatypes in: StochasticParameter // %s (floordiv). \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __radd__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Add(other, self)\n raise Exception(\n \"Invalid datatypes in: %s + StochasticParameter. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rsub__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Subtract(other, self)\n raise Exception(\n \"Invalid datatypes in: %s - StochasticParameter. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rmul__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Multiply(other, self)\n raise Exception(\n \"Invalid datatypes in: %s * StochasticParameter. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rpow__(self, other, z=None):\n if z is not None:\n raise NotImplementedError(\n \"Modulo power is currently not supported by \" \"StochasticParameter.\"\n )\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Power(other, self)\n raise Exception(\n \"Invalid datatypes in: %s ** StochasticParameter. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rdiv__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Divide(other, self)\n raise Exception(\n \"Invalid datatypes in: %s / StochasticParameter. \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rtruediv__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Divide(other, self)\n raise Exception(\n \"Invalid datatypes in: %s / StochasticParameter (rtruediv). \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def __rfloordiv__(self, other):\n if ia.is_single_number(other) or isinstance(other, StochasticParameter):\n return Discretize(Divide(other, self))\n raise Exception(\n \"Invalid datatypes in: StochasticParameter // %s (rfloordiv). \"\n \"Expected second argument to be number or \"\n \"StochasticParameter.\" % (type(other),)\n )\n\n def copy(self):\n \"\"\"Create a shallow copy of this parameter.\n\n Returns\n -------\n imgaug.parameters.StochasticParameter\n Shallow copy.\n\n \"\"\"\n return copy_module.copy(self)\n\n def deepcopy(self):\n \"\"\"Create a deep copy of this parameter.\n\n Returns\n -------\n imgaug.parameters.StochasticParameter\n Deep copy.\n\n \"\"\"\n return copy_module.deepcopy(self)\n\n def draw_distribution_graph(self, title=None, size=(1000, 1000), bins=100):\n \"\"\"Generate an image visualizing the parameter's sample distribution.\n\n Parameters\n ----------\n title : None or False or str, optional\n Title of the plot. ``None`` is automatically replaced by a title\n derived from ``str(param)``. If set to ``False``, no title will be\n shown.\n\n size : tuple of int\n Number of points to sample. This is always expected to have at\n least two values. The first defines the number of sampling runs,\n the second (and further) dimensions define the size assigned\n to each :func:`~imgaug.parameters.StochasticParameter.draw_samples`\n call. E.g. ``(10, 20, 15)`` will lead to ``10`` calls of\n ``draw_samples(size=(20, 15))``. The results will be merged to a\n single 1d array.\n\n bins : int\n Number of bins in the plot histograms.\n\n Returns\n -------\n data : (H,W,3) ndarray\n Image of the plot.\n\n \"\"\"\n # import only when necessary (faster startup; optional dependency;\n # less fragile -- see issue #225)\n import matplotlib.pyplot as plt\n\n points = []\n for _ in sm.xrange(size[0]):\n points.append(self.draw_samples(size[1:]).flatten())\n points = np.concatenate(points)\n\n fig = plt.figure()\n fig.add_subplot(111)\n ax = fig.gca()\n heights, bins = np.histogram(points, bins=bins)\n heights = heights / sum(heights)\n ax.bar(\n bins[:-1],\n heights,\n width=(max(bins) - min(bins)) / len(bins),\n color=\"blue\",\n alpha=0.75,\n )\n\n if title is None:\n title = str(self)\n if title is not False:\n # split long titles - otherwise matplotlib generates errors\n title_fragments = [title[i : i + 50] for i in sm.xrange(0, len(title), 50)]\n ax.set_title(\"\\n\".join(title_fragments))\n fig.tight_layout(pad=0)\n\n with tempfile.NamedTemporaryFile(mode=\"wb+\", suffix=\".png\") as f:\n # We don't add bbox_inches='tight' here so that\n # draw_distributions_grid has an easier time combining many plots.\n # Note that we could also use 'f.name' here instead of 'f', but\n # that fails on Windows.\n fig.savefig(f, format=\"png\")\n\n # Use f.seek() here, because otherwise we get an error that\n # the file was not a png image.\n f.seek(0)\n data = imageio.imread(f, pilmode=\"RGB\", format=\"png\")[..., 0:3]\n\n plt.close()\n\n return data\n\n\nclass AutoPrefetcher(StochasticParameter):\n \"\"\"Parameter that prefetches random samples from a child parameter.\n\n This parameter will fetch ``N`` random samples in one big swoop and then\n return ``M`` of these samples upon each call, with ``M << N``.\n This improves the sampling efficiency by performing as few sampling\n calls as possible.\n\n This parameter will only start to prefetch after the first call.\n In some cases this prevents inefficiencies when augmenters are only used\n once. (Though this only works if the respective augmenter performs\n a single sampling call per batch and not one call per image.)\n\n This parameter will throw away its prefetched samples if a new RNG\n is provided (compared to the previous call). It will however ignore the\n state of the RNG.\n\n This parameter should only wrap leaf nodes. In something like\n ``Add(1, Normal(Uniform(0, 1), Uniform(0, 2)))`` it should only be applied\n to the two ``Uniform`` instaces. Otherwise, only a single sample of\n ``Uniform(0, 1)`` might be taken and influence thousands of samples of\n ``Normal``.\n\n Note that the samples returned by this parameter are part of a larger\n array. In-place changes to these samples should hence be performed with\n some caution.\n\n Added in 0.5.0.\n\n \"\"\"\n\n # Added in 0.5.0.\n def __init__(self, other_param, nb_prefetch):\n super(AutoPrefetcher, self).__init__()\n self.other_param = other_param\n self.nb_prefetch = nb_prefetch\n\n self.samples = None\n self.index = 0\n self.last_rng_idx = None\n\n # Added in 0.5.0.\n def _draw_samples(self, size, random_state):\n # pylint: disable=protected-access\n if not _PREFETCHING_ENABLED:\n return self.other_param.draw_samples(size, random_state)\n\n if self.last_rng_idx is None or random_state._idx != self.last_rng_idx:\n self.last_rng_idx = random_state._idx\n self.samples = None\n return self.other_param.draw_samples(size, random_state)\n\n self.last_rng_idx = random_state._idx\n\n nb_components = reduce(mul_op, size)\n\n if nb_components >= self.nb_prefetch:\n return self.other_param.draw_samples(size, random_state)\n\n if self.samples is None:\n self._prefetch(random_state)\n\n leftover = len(self.samples) - self.index - nb_components\n if leftover <= 0:\n self._prefetch(random_state)\n\n samples = self.samples[self.index : self.index + nb_components]\n self.index += nb_components\n\n return samples.reshape(size)\n\n # Added in 0.5.0.\n def _prefetch(self, random_state):\n samples = self.other_param.draw_samples((self.nb_prefetch,), random_state)\n if self.samples is None:\n self.samples = samples\n else:\n self.samples = np.concatenate([self.samples[self.index :], samples], axis=0)\n self.index = 0\n\n # Added in 0.5.0.\n def __getattr__(self, attr):\n other_param = super(AutoPrefetcher, self).__getattribute__(\"other_param\")\n return getattr(other_param, attr)\n\n # Added in 0.5.0.\n def __repr__(self):\n return self.__str__()\n\n # Added in 0.5.0.\n def __str__(self):\n has_samples = self.samples is not None\n return (\n \"AutoPrefetcher(\"\n \"nb_prefetch=%d, \"\n \"samples=%s (dtype %s), \"\n \"index=%d, \"\n \"last_rng_idx=%s, \"\n \"other_param=%s\"\n \")\"\n % (\n self.nb_prefetch,\n self.samples.shape if has_samples else \"None\",\n self.samples.dtype.name if has_samples else \"None\",\n self.index,\n self.last_rng_idx,\n str(self.other_param),\n )\n )\n\n\nclass Deterministic(StochasticParameter):\n \"\"\"Parameter that is a constant value.\n\n If ``N`` values are sampled from this parameter, it will return ``N`` times\n ``V``, where ``V`` is the constant value.\n\n Parameters\n ----------\n value : number or str or imgaug.parameters.StochasticParameter\n A constant value to use.\n A string may be provided to generate arrays of strings.\n If this is a StochasticParameter, a single value will be sampled\n from it exactly once and then used as the constant value.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Deterministic(10)\n >>> param.draw_sample()\n 10\n\n Will always sample the value 10.\n\n \"\"\"\n\n def __init__(self, value):\n super(Deterministic, self).__init__()\n\n if isinstance(value, StochasticParameter):\n self.value = value.draw_sample()\n elif ia.is_single_number(value) or ia.is_string(value):\n self.value = value\n else:\n raise Exception(\n \"Expected StochasticParameter object or number or \"\n \"string, got %s.\" % (type(value),)\n )\n\n def _draw_samples(self, size, random_state):\n kwargs = {}\n if ia.is_single_integer(self.value):\n kwargs = {\"dtype\": np.int32}\n elif ia.is_single_float(self.value):\n kwargs = {\"dtype\": np.float32}\n return np.full(size, self.value, **kwargs)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n if ia.is_single_integer(self.value):\n return \"Deterministic(int %d)\" % (self.value,)\n if ia.is_single_float(self.value):\n return \"Deterministic(float %.8f)\" % (self.value,)\n return \"Deterministic(%s)\" % (str(self.value),)\n\n\n# TODO replace two-value parameters used in tests with this\nclass DeterministicList(StochasticParameter):\n \"\"\"Parameter that repeats elements from a list in the given order.\n\n E.g. of samples of shape ``(A, B, C)`` are requested, this parameter will\n return the first ``A*B*C`` elements, reshaped to ``(A, B, C)`` from the\n provided list. If the list contains less than ``A*B*C`` elements, it\n will (by default) be tiled until it is long enough (i.e. the sampling\n will start again at the first element, if necessary multiple times).\n\n Added in 0.4.0.\n\n Parameters\n ----------\n values : ndarray or iterable of number\n An iterable of values to sample from in the order within the iterable.\n\n \"\"\"\n\n # Added in 0.4.0.\n def __init__(self, values):\n super(DeterministicList, self).__init__()\n\n assert ia.is_iterable(\n values\n ), \"Expected to get an iterable as input, got type %s.\" % (\n type(values).__name__,\n )\n assert len(values) > 0, \"Expected to get at least one value, got \" \"zero.\"\n\n if ia.is_np_array(values):\n # this would not be able to handle e.g. [[1, 2], [3]] and output\n # dtype object due to the non-regular shape, hence we have the\n # else block\n self.values = values.flatten()\n else:\n self.values = np.array(list(ia.flatten(values)))\n kind = self.values.dtype.kind\n\n # limit to 32bit instead of 64bit for efficiency\n if kind == \"i\":\n self.values = self.values.astype(np.int32)\n elif kind == \"f\":\n self.values = self.values.astype(np.float32)\n\n # Added in 0.4.0.\n def _draw_samples(self, size, random_state):\n nb_requested = int(np.prod(size))\n values = self.values\n if nb_requested > self.values.size:\n # we don't use itertools.cycle() here, as that would require\n # running through a loop potentially many times (as `size` can\n # be very large), which would be slow\n multiplier = int(np.ceil(nb_requested / values.size))\n values = np.tile(values, (multiplier,))\n return values[:nb_requested].reshape(size)\n\n # Added in 0.4.0.\n def __repr__(self):\n return self.__str__()\n\n # Added in 0.4.0.\n def __str__(self):\n if self.values.dtype.kind == \"f\":\n values = [\"%.4f\" % (value,) for value in self.values]\n return \"DeterministicList([%s])\" % (\", \".join(values),)\n return \"DeterministicList(%s)\" % (str(self.values.tolist()),)\n\n\nclass Choice(StochasticParameter):\n \"\"\"Parameter that samples value from a list of allowed values.\n\n Parameters\n ----------\n a : iterable\n List of allowed values.\n Usually expected to be ``int`` s, ``float`` s or ``str`` s.\n May also contain ``StochasticParameter`` s. Each\n ``StochasticParameter`` that is randomly picked will automatically be\n replaced by a sample of itself (or by ``N`` samples if the parameter\n was picked ``N`` times).\n\n replace : bool, optional\n Whether to perform sampling with or without replacing.\n\n p : None or iterable of number, optional\n Probabilities of each element in `a`.\n Must have the same length as `a` (if provided).\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Choice([5, 17, 25], p=[0.25, 0.5, 0.25])\n >>> sample = param.draw_sample()\n >>> assert sample in [5, 17, 25]\n\n Create and sample from a parameter, which will produce with ``50%``\n probability the sample ``17`` and in the other ``50%`` of all cases the\n sample ``5`` or ``25``..\n\n \"\"\"\n\n def __init__(self, a, replace=True, p=None):\n # pylint: disable=invalid-name\n super(Choice, self).__init__()\n\n assert ia.is_iterable(\n a\n ), \"Expected a to be an iterable (e.g. list), got %s.\" % (type(a),)\n self.a = a\n self.replace = replace\n if p is not None:\n assert ia.is_iterable(\n p\n ), \"Expected p to be None or an iterable, got %s.\" % (type(p),)\n assert len(p) == len(\n a\n ), \"Expected lengths of a and p to be identical, \" \"got %d and %d.\" % (\n len(a),\n len(p),\n )\n self.p = p\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return self.replace\n\n def _draw_samples(self, size, random_state):\n if any([isinstance(a_i, StochasticParameter) for a_i in self.a]):\n rngs = random_state.duplicate(1 + len(self.a))\n samples = rngs[0].choice(\n self.a, np.prod(size), replace=self.replace, p=self.p\n )\n\n # collect the sampled parameters and how many samples must be taken\n # from each of them\n params_counter = defaultdict(lambda: 0)\n for sample in samples:\n if isinstance(sample, StochasticParameter):\n key = str(sample)\n params_counter[key] += 1\n\n # collect per parameter once the required number of samples\n # iterate here over self.a to always use the same seed for\n # the same parameter\n # TODO this might fail if the same parameter is added multiple\n # times to self.a?\n # TODO this will fail if a parameter cant handle size=(N,)\n param_to_samples = dict()\n for i, param in enumerate(self.a):\n key = str(param)\n if key in params_counter:\n param_to_samples[key] = param.draw_samples(\n size=(params_counter[key],), random_state=rngs[1 + i]\n )\n\n # assign the values sampled from the parameters to the `samples`\n # array by replacing the respective parameter\n param_to_readcount = defaultdict(lambda: 0)\n for i, sample in enumerate(samples):\n if isinstance(sample, StochasticParameter):\n key = str(sample)\n readcount = param_to_readcount[key]\n samples[i] = param_to_samples[key][readcount]\n param_to_readcount[key] += 1\n\n samples = samples.reshape(size)\n else:\n samples = random_state.choice(self.a, size, replace=self.replace, p=self.p)\n\n dtype = samples.dtype\n if dtype.itemsize * 8 > 32:\n # strings have kind \"U\"\n kind = dtype.kind\n if kind == \"i\":\n samples = samples.astype(np.int32)\n elif kind == \"u\":\n samples = samples.astype(np.uint32)\n elif kind == \"f\":\n samples = samples.astype(np.float32)\n\n return samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Choice(a=%s, replace=%s, p=%s)\" % (\n str(self.a),\n str(self.replace),\n str(self.p),\n )\n\n\nclass Binomial(StochasticParameter):\n \"\"\"Binomial distribution.\n\n Parameters\n ----------\n p : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Probability of the binomial distribution. Expected to be in the\n interval ``[0.0, 1.0]``.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Binomial.draw_sample` or\n :func:`Binomial.draw_samples`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Binomial(Uniform(0.01, 0.2))\n\n Create a binomial distribution that uses a varying probability between\n ``0.01`` and ``0.2``, randomly and uniformly estimated once per sampling\n call.\n\n \"\"\"\n\n def __init__(self, p):\n super(Binomial, self).__init__()\n self.p = handle_continuous_param(p, \"p\")\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n p = self.p.draw_sample(random_state=random_state)\n assert (\n 0 <= p <= 1.0\n ), \"Expected probability p to be in the interval [0.0, 1.0], \" \"got %.4f.\" % (\n p,\n )\n return random_state.binomial(1, p, size).astype(np.int32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Binomial(%s)\" % (self.p,)\n\n\nclass DiscreteUniform(StochasticParameter):\n \"\"\"Uniform distribution over the discrete interval ``[a..b]``.\n\n Parameters\n ----------\n a : int or tuple of int or list of int or imgaug.parameters.StochasticParameter\n Lower bound of the interval.\n If ``a>b``, `a` and `b` will automatically be flipped.\n If ``a==b``, all generated values will be identical to `a`.\n\n * If a single ``int``, this ``int`` will be used as a\n constant value.\n * If a ``tuple`` of two ``int`` s ``(a, b)``, the value will be\n sampled from the discrete interval ``[a..b]`` once per call.\n * If a ``list`` of ``int``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`DiscreteUniform.draw_sample` or\n :func:`DiscreteUniform.draw_samples`.\n\n b : int or imgaug.parameters.StochasticParameter\n Upper bound of the interval. Analogous to `a`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.DiscreteUniform(10, Choice([20, 30, 40]))\n >>> sample = param.draw_sample()\n >>> assert 10 <= sample <= 40\n\n Create a discrete uniform distribution which's interval differs between\n calls and can be ``[10..20]``, ``[10..30]`` or ``[10..40]``.\n\n \"\"\"\n\n def __init__(self, a, b):\n # pylint: disable=invalid-name\n super(DiscreteUniform, self).__init__()\n\n self.a = handle_discrete_param(a, \"a\")\n self.b = handle_discrete_param(b, \"b\")\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=invalid-name\n a = self.a.draw_sample(random_state=random_state)\n b = self.b.draw_sample(random_state=random_state)\n if a > b:\n a, b = b, a\n elif a == b:\n return np.full(size, a, dtype=np.int32)\n return random_state.integers(a, b + 1, size, dtype=np.int32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"DiscreteUniform(%s, %s)\" % (self.a, self.b)\n\n\nclass Poisson(StochasticParameter):\n \"\"\"Parameter that resembles a poisson distribution.\n\n A poisson distribution with ``lambda=0`` has its highest probability at\n point ``0`` and decreases quickly from there.\n Poisson distributions are discrete and never negative.\n\n Parameters\n ----------\n lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Lambda parameter of the poisson distribution.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Poisson.draw_sample` or\n :func:`Poisson.draw_samples`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Poisson(1)\n >>> sample = param.draw_sample()\n >>> assert sample >= 0\n\n Create a poisson distribution with ``lambda=1`` and sample a value from\n it.\n\n \"\"\"\n\n def __init__(self, lam):\n super(Poisson, self).__init__()\n\n self.lam = handle_continuous_param(lam, \"lam\")\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n lam = self.lam.draw_sample(random_state=random_state)\n lam = max(lam, 0)\n\n return random_state.poisson(lam=lam, size=size).astype(np.int32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Poisson(%s)\" % (self.lam,)\n\n\nclass Normal(StochasticParameter):\n \"\"\"Parameter that resembles a normal/gaussian distribution.\n\n Parameters\n ----------\n loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The mean of the normal distribution.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Laplace.draw_sample` or\n :func:`Laplace.draw_samples`.\n\n scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The standard deviation of the normal distribution.\n If this parameter reaches ``0``, the output array will be filled with\n `loc`.\n Datatype behaviour is the analogous to `loc`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Normal(Choice([-1.0, 1.0]), 1.0)\n\n Create a gaussian distribution with a mean that differs by call.\n Samples values may sometimes follow ``N(-1.0, 1.0)`` and sometimes\n ``N(1.0, 1.0)``.\n\n \"\"\"\n\n def __init__(self, loc, scale):\n super(Normal, self).__init__()\n\n self.loc = handle_continuous_param(loc, \"loc\")\n self.scale = handle_continuous_param(scale, \"scale\", value_range=(0, None))\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n loc = self.loc.draw_sample(random_state=random_state)\n scale = self.scale.draw_sample(random_state=random_state)\n assert scale >= 0, \"Expected scale to be >=0, got %.4f.\" % (scale,)\n if scale == 0:\n return np.full(size, loc, dtype=np.float32)\n return random_state.normal(loc, scale, size=size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Normal(loc=%s, scale=%s)\" % (self.loc, self.scale)\n\n\n# TODO docstring for parameters is outdated\nclass TruncatedNormal(StochasticParameter):\n \"\"\"Parameter that resembles a truncated normal distribution.\n\n A truncated normal distribution is similar to a normal distribution,\n except the domain is smoothly bounded to a min and max value.\n\n This is a wrapper around :func:`scipy.stats.truncnorm`.\n\n Parameters\n ----------\n loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The mean of the normal distribution.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`TruncatedNormal.draw_sample` or\n :func:`TruncatedNormal.draw_samples`.\n\n scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The standard deviation of the normal distribution.\n If this parameter reaches ``0``, the output array will be filled with\n `loc`.\n Datatype behaviour is the same as for `loc`.\n\n low : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The minimum value of the truncated normal distribution.\n Datatype behaviour is the same as for `loc`.\n\n high : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The maximum value of the truncated normal distribution.\n Datatype behaviour is the same as for `loc`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.TruncatedNormal(0, 5.0, low=-10, high=10)\n >>> samples = param.draw_samples(100, random_state=0)\n >>> assert np.all(samples >= -10)\n >>> assert np.all(samples <= 10)\n\n Create a truncated normal distribution with its minimum at ``-10.0``\n and its maximum at ``10.0``.\n\n \"\"\"\n\n def __init__(self, loc, scale, low=-np.inf, high=np.inf):\n super(TruncatedNormal, self).__init__()\n\n self.loc = handle_continuous_param(loc, \"loc\")\n self.scale = handle_continuous_param(scale, \"scale\", value_range=(0, None))\n self.low = handle_continuous_param(low, \"low\")\n self.high = handle_continuous_param(high, \"high\")\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=invalid-name\n loc = self.loc.draw_sample(random_state=random_state)\n scale = self.scale.draw_sample(random_state=random_state)\n low = self.low.draw_sample(random_state=random_state)\n high = self.high.draw_sample(random_state=random_state)\n seed = random_state.generate_seed_()\n if low > high:\n low, high = high, low\n assert scale >= 0, \"Expected scale to be >=0, got %.4f.\" % (scale,)\n if scale == 0:\n return np.full(size, fill_value=loc, dtype=np.float32)\n a = (low - loc) / scale\n b = (high - loc) / scale\n tnorm = scipy.stats.truncnorm(a=a, b=b, loc=loc, scale=scale)\n\n # Using a seed here works with both np.random interfaces.\n # Last time tried, scipy crashed when providing just\n # random_state.generator on the new np.random interface.\n return tnorm.rvs(size=size, random_state=seed).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"TruncatedNormal(loc=%s, scale=%s, low=%s, high=%s)\" % (\n self.loc,\n self.scale,\n self.low,\n self.high,\n )\n\n\nclass Laplace(StochasticParameter):\n \"\"\"Parameter that resembles a (continuous) laplace distribution.\n\n This is a wrapper around numpy's :func:`numpy.random.laplace`.\n\n Parameters\n ----------\n loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The position of the distribution peak, similar to the mean in normal\n distributions.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Laplace.draw_sample` or\n :func:`Laplace.draw_samples`.\n\n scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n The exponential decay factor, similar to the standard deviation in\n gaussian distributions.\n If this parameter reaches ``0``, the output array will be filled with\n `loc`.\n Datatype behaviour is the analogous to `loc`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Laplace(0, 1.0)\n\n Create a laplace distribution, which's peak is at ``0`` and decay is\n ``1.0``.\n\n \"\"\"\n\n def __init__(self, loc, scale):\n super(Laplace, self).__init__()\n\n self.loc = handle_continuous_param(loc, \"loc\")\n self.scale = handle_continuous_param(scale, \"scale\", value_range=(0, None))\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n loc = self.loc.draw_sample(random_state=random_state)\n scale = self.scale.draw_sample(random_state=random_state)\n assert scale >= 0, \"Expected scale to be >=0, got %s.\" % (scale,)\n if scale == 0:\n return np.full(size, loc, dtype=np.float32)\n return random_state.laplace(loc, scale, size=size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Laplace(loc=%s, scale=%s)\" % (self.loc, self.scale)\n\n\nclass ChiSquare(StochasticParameter):\n \"\"\"Parameter that resembles a (continuous) chi-square distribution.\n\n This is a wrapper around numpy's :func:`numpy.random.chisquare`.\n\n Parameters\n ----------\n df : int or tuple of two int or list of int or imgaug.parameters.StochasticParameter\n Degrees of freedom. Expected value range is ``[1, inf)``.\n\n * If a single ``int``, this ``int`` will be used as a\n constant value.\n * If a ``tuple`` of two ``int`` s ``(a, b)``, the value will be\n sampled from the discrete interval ``[a..b]`` once per call.\n * If a ``list`` of ``int``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`ChiSquare.draw_sample` or\n :func:`ChiSquare.draw_samples`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.ChiSquare(df=2)\n\n Create a chi-square distribution with two degrees of freedom.\n\n \"\"\"\n\n def __init__(self, df):\n # pylint: disable=invalid-name\n super(ChiSquare, self).__init__()\n\n self.df = handle_discrete_param(df, \"df\", value_range=(1, None))\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=invalid-name\n df = self.df.draw_sample(random_state=random_state)\n assert df >= 1, \"Expected df to be >=1, got %d.\" % (df,)\n return random_state.chisquare(df, size=size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"ChiSquare(df=%s)\" % (self.df,)\n\n\nclass Weibull(StochasticParameter):\n \"\"\"\n Parameter that resembles a (continuous) weibull distribution.\n\n This is a wrapper around numpy's :func:`numpy.random.weibull`.\n\n Parameters\n ----------\n a : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Shape parameter of the distribution.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Weibull.draw_sample` or\n :func:`Weibull.draw_samples`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Weibull(a=0.5)\n\n Create a weibull distribution with shape 0.5.\n\n \"\"\"\n\n def __init__(self, a):\n # pylint: disable=invalid-name\n super(Weibull, self).__init__()\n\n self.a = handle_continuous_param(a, \"a\", value_range=(0.0001, None))\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=invalid-name\n a = self.a.draw_sample(random_state=random_state)\n assert a > 0, \"Expected a to be >0, got %.4f.\" % (a,)\n return random_state.weibull(a, size=size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Weibull(a=%s)\" % (self.a,)\n\n\n# TODO rename (a, b) to (low, high) as in numpy?\nclass Uniform(StochasticParameter):\n \"\"\"Parameter that resembles a uniform distribution over ``[a, b)``.\n\n Parameters\n ----------\n a : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Lower bound of the interval.\n If ``a>b``, `a` and `b` will automatically be flipped.\n If ``a==b``, all generated values will be identical to `a`.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Uniform.draw_sample` or\n :func:`Uniform.draw_samples`.\n\n b : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Upper bound of the interval. Analogous to `a`.\n\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Uniform(0, 10.0)\n >>> sample = param.draw_sample()\n >>> assert 0 <= sample < 10.0\n\n Create and sample from a uniform distribution over ``[0, 10.0)``.\n\n \"\"\"\n\n def __init__(self, a, b):\n # pylint: disable=invalid-name\n super(Uniform, self).__init__()\n\n self.a = handle_continuous_param(a, \"a\")\n self.b = handle_continuous_param(b, \"b\")\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=invalid-name\n a = self.a.draw_sample(random_state=random_state)\n b = self.b.draw_sample(random_state=random_state)\n if a > b:\n a, b = b, a\n elif a == b:\n return np.full(size, a, dtype=np.float32)\n return random_state.uniform(a, b, size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Uniform(%s, %s)\" % (self.a, self.b)\n\n\nclass Beta(StochasticParameter):\n \"\"\"Parameter that resembles a (continuous) beta distribution.\n\n Parameters\n ----------\n alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n alpha parameter of the beta distribution.\n Expected value range is ``(0, inf)``. Values below ``0`` are\n automatically clipped to ``0+epsilon``.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Beta.draw_sample` or\n :func:`Beta.draw_samples`.\n\n beta : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Beta parameter of the beta distribution. Analogous to `alpha`.\n\n epsilon : number\n Clipping parameter. If `alpha` or `beta` end up ``<=0``, they are clipped to ``0+epsilon``.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Beta(0.4, 0.6)\n\n Create a beta distribution with ``alpha=0.4`` and ``beta=0.6``.\n\n \"\"\"\n\n def __init__(self, alpha, beta, epsilon=0.0001):\n super(Beta, self).__init__()\n\n self.alpha = handle_continuous_param(alpha, \"alpha\")\n self.beta = handle_continuous_param(beta, \"beta\")\n\n assert ia.is_single_number(\n epsilon\n ), \"Expected epsilon to a number, got type %s.\" % (type(epsilon),)\n self.epsilon = epsilon\n\n # Added in 0.5.0.\n @property\n def prefetchable(self):\n \"\"\"See :func:`StochasticParameter.prefetchable`.\"\"\"\n return True\n\n def _draw_samples(self, size, random_state):\n alpha = self.alpha.draw_sample(random_state=random_state)\n beta = self.beta.draw_sample(random_state=random_state)\n alpha = max(alpha, self.epsilon)\n beta = max(beta, self.epsilon)\n return random_state.beta(alpha, beta, size=size).astype(np.float32)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Beta(%s, %s)\" % (self.alpha, self.beta)\n\n\nclass FromLowerResolution(StochasticParameter):\n \"\"\"Parameter to sample from other parameters at lower image resolutions.\n\n This parameter is intended to be used with parameters that would usually\n sample one value per pixel (or one value per pixel and channel). Instead\n of sampling from the other parameter at full resolution, it samples at\n lower resolution, e.g. ``0.5*H x 0.5*W`` with ``H`` being the height and\n ``W`` being the width. After the low-resolution sampling this parameter\n then upscales the result to ``HxW``.\n\n This parameter is intended to produce coarse samples. E.g. combining\n this with :class:`Binomial` can lead to large rectangular areas of\n ``1`` s and ``0`` s.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n The other parameter which is to be sampled on a coarser image.\n\n size_percent : None or number or iterable of number or imgaug.parameters.StochasticParameter, optional\n Size of the 2d sampling plane in percent of the requested size.\n I.e. this is relative to the size provided in the call to\n ``draw_samples(size)``. Lower values will result in smaller sampling\n planes, which are then upsampled to `size`. This means that lower\n values will result in larger rectangles. The size may be provided as\n a constant value or a tuple ``(a, b)``, which will automatically be\n converted to the continuous uniform range ``[a, b)`` or a\n :class:`StochasticParameter`, which will be queried per call to\n :func:`FromLowerResolution.draw_sample` and\n :func:`FromLowerResolution.draw_samples`.\n\n size_px : None or number or iterable of numbers or imgaug.parameters.StochasticParameter, optional\n Size of the 2d sampling plane in pixels.\n Lower values will result in smaller sampling planes, which are then\n upsampled to the input `size` of ``draw_samples(size)``.\n This means that lower values will result in larger rectangles.\n The size may be provided as a constant value or a tuple ``(a, b)``,\n which will automatically be converted to the discrete uniform\n range ``[a..b]`` or a :class:`StochasticParameter`, which will be\n queried once per call to :func:`FromLowerResolution.draw_sample` and\n :func:`FromLowerResolution.draw_samples`.\n\n method : str or int or imgaug.parameters.StochasticParameter, optional\n Upsampling/interpolation method to use. This is used after the sampling\n is finished and the low resolution plane has to be upsampled to the\n requested `size` in ``draw_samples(size, ...)``. The method may be\n the same as in :func:`~imgaug.imgaug.imresize_many_images`. Usually\n ``nearest`` or ``linear`` are good choices. ``nearest`` will result\n in rectangles with sharp edges and ``linear`` in rectangles with\n blurry and round edges. The method may be provided as a\n :class:`StochasticParameter`, which will be queried once per call to\n :func:`FromLowerResolution.draw_sample` and\n :func:`FromLowerResolution.draw_samples`.\n\n min_size : int, optional\n Minimum size in pixels of the low resolution sampling plane.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.FromLowerResolution(\n >>> Binomial(0.05),\n >>> size_px=(2, 16),\n >>> method=Choice([\"nearest\", \"linear\"]))\n\n Samples from a binomial distribution with ``p=0.05``. The sampling plane\n will always have a size HxWxC with H and W being independently sampled\n from ``[2..16]`` (i.e. it may range from ``2x2xC`` up to ``16x16xC`` max,\n but may also be e.g. ``4x8xC``). The upsampling method will be ``nearest``\n in ``50%`` of all cases and ``linear`` in the other 50 percent. The result\n will sometimes be rectangular patches of sharp ``1`` s surrounded by\n ``0`` s and sometimes blurry blobs of ``1``s, surrounded by values\n ``<1.0``.\n\n \"\"\"\n\n def __init__(\n self, other_param, size_percent=None, size_px=None, method=\"nearest\", min_size=1\n ):\n super(FromLowerResolution, self).__init__()\n\n assert size_percent is not None or size_px is not None, (\n \"Expected either 'size_percent' or 'size_px' to be provided, \"\n \"got neither of them.\"\n )\n\n if size_percent is not None:\n self.size_method = \"percent\"\n self.size_px = None\n if ia.is_single_number(size_percent):\n self.size_percent = Deterministic(size_percent)\n elif ia.is_iterable(size_percent):\n assert len(size_percent) == 2, (\n \"Expected iterable 'size_percent' to contain exactly 2 \"\n \"values, got %d.\" % (len(size_percent),)\n )\n self.size_percent = Uniform(size_percent[0], size_percent[1])\n elif isinstance(size_percent, StochasticParameter):\n self.size_percent = size_percent\n else:\n raise Exception(\n \"Expected int, float, tuple of two ints/floats or \"\n \"StochasticParameter for size_percent, \"\n \"got %s.\" % (type(size_percent),)\n )\n else: # = elif size_px is not None:\n self.size_method = \"px\"\n self.size_percent = None\n if ia.is_single_integer(size_px):\n self.size_px = Deterministic(size_px)\n elif ia.is_iterable(size_px):\n assert len(size_px) == 2, (\n \"Expected iterable 'size_px' to contain exactly 2 \"\n \"values, got %d.\" % (len(size_px),)\n )\n self.size_px = DiscreteUniform(size_px[0], size_px[1])\n elif isinstance(size_px, StochasticParameter):\n self.size_px = size_px\n else:\n raise Exception(\n \"Expected int, float, tuple of two ints/floats or \"\n \"StochasticParameter for size_px, \"\n \"got %s.\" % (type(size_px),)\n )\n\n self.other_param = other_param\n\n if ia.is_string(method) or ia.is_single_integer(method):\n self.method = Deterministic(method)\n elif isinstance(method, StochasticParameter):\n self.method = method\n else:\n raise Exception(\n \"Expected string or StochasticParameter, \" \"got %s.\" % (type(method),)\n )\n\n self.min_size = min_size\n\n def _draw_samples(self, size, random_state):\n if len(size) == 3:\n n = 1\n h, w, c = size\n elif len(size) == 4:\n n, h, w, c = size\n else:\n raise Exception(\n \"FromLowerResolution can only generate samples \"\n \"of shape (H, W, C) or (N, H, W, C), \"\n \"requested was %s.\" % (str(size),)\n )\n\n if self.size_method == \"percent\":\n hw_percents = self.size_percent.draw_samples(\n (n, 2), random_state=random_state\n )\n hw_pxs = (hw_percents * np.array([h, w])).astype(np.int32)\n else:\n hw_pxs = self.size_px.draw_samples((n, 2), random_state=random_state)\n\n methods = self.method.draw_samples((n,), random_state=random_state)\n result = None\n for i, (hw_px, method) in enumerate(zip(hw_pxs, methods)):\n h_small = max(hw_px[0], self.min_size)\n w_small = max(hw_px[1], self.min_size)\n samples = self.other_param.draw_samples(\n (1, h_small, w_small, c), random_state=random_state\n )\n\n # This (1) makes sure that samples are of dtypes supported by\n # imresize_many_images, and (2) forces samples to be float-kind\n # if the requested interpolation is something else than nearest\n # neighbour interpolation. (2) is a bit hacky and makes sure that\n # continuous values are produced for e.g. cubic interpolation.\n # This is particularly important for e.g. binomial distributios\n # used in FromLowerResolution and thereby in e.g. CoarseDropout,\n # where integer-kinds would lead to sharp edges despite using\n # cubic interpolation.\n if samples.dtype.kind == \"f\":\n samples = iadt.restore_dtypes_(samples, np.float32)\n elif samples.dtype.kind == \"i\":\n if method == \"nearest\":\n samples = iadt.restore_dtypes_(samples, np.int32)\n else:\n samples = iadt.restore_dtypes_(samples, np.float32)\n else:\n assert samples.dtype.kind == \"u\", (\n \"FromLowerResolution can only process outputs of kind \"\n \"f (float), i (int) or u (uint), got %s.\" % (samples.dtype.kind)\n )\n if method == \"nearest\":\n samples = iadt.restore_dtypes_(samples, np.uint16)\n else:\n samples = iadt.restore_dtypes_(samples, np.float32)\n\n samples_upscaled = ia.imresize_many_images(\n samples, (h, w), interpolation=method\n )\n\n if result is None:\n result = np.zeros((n, h, w, c), dtype=samples_upscaled.dtype)\n result[i] = samples_upscaled\n\n if len(size) == 3:\n return result[0]\n return result\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n if self.size_method == \"percent\":\n pattern = (\n \"FromLowerResolution(\" \"size_percent=%s, method=%s, other_param=%s\" \")\"\n )\n return pattern % (self.size_percent, self.method, self.other_param)\n\n pattern = \"FromLowerResolution(\" \"size_px=%s, method=%s, other_param=%s\" \")\"\n return pattern % (self.size_px, self.method, self.other_param)\n\n\nclass Clip(StochasticParameter):\n \"\"\"Clip another parameter to a defined value range.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n The other parameter, which's values are to be clipped.\n\n minval : None or number, optional\n The minimum value to use.\n If ``None``, no minimum will be used.\n\n maxval : None or number, optional\n The maximum value to use.\n If ``None``, no maximum will be used.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Clip(Normal(0, 1.0), minval=-2.0, maxval=2.0)\n\n Create a standard gaussian distribution, which's values never go below\n ``-2.0`` or above ``2.0``. Note that this will lead to small \"bumps\" of\n higher probability at ``-2.0`` and ``2.0``, as values below/above these\n will be clipped to them. For smoother limitations on gaussian\n distributions, see :class:`TruncatedNormal`.\n\n \"\"\"\n\n def __init__(self, other_param, minval=None, maxval=None):\n super(Clip, self).__init__()\n\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n assert minval is None or ia.is_single_number(\n minval\n ), \"Expected 'minval' to be None or a number, got type %s.\" % (type(minval),)\n assert maxval is None or ia.is_single_number(\n maxval\n ), \"Expected 'maxval' to be None or a number, got type %s.\" % (type(maxval),)\n\n self.other_param = other_param\n self.minval = minval\n self.maxval = maxval\n\n def _draw_samples(self, size, random_state):\n samples = self.other_param.draw_samples(size, random_state=random_state)\n if self.minval is not None or self.maxval is not None:\n # Note that this would produce a warning if 'samples' is int64\n # or uint64\n samples = np.clip(samples, self.minval, self.maxval, out=samples)\n return samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n if self.minval is not None and self.maxval is not None:\n return \"Clip(%s, %.6f, %.6f)\" % (\n opstr,\n float(self.minval),\n float(self.maxval),\n )\n if self.minval is not None:\n return \"Clip(%s, %.6f, None)\" % (opstr, float(self.minval))\n if self.maxval is not None:\n return \"Clip(%s, None, %.6f)\" % (opstr, float(self.maxval))\n return \"Clip(%s, None, None)\" % (opstr,)\n\n\nclass Discretize(StochasticParameter):\n \"\"\"Convert a continuous distribution to a discrete one.\n\n This will round the values and then cast them to integers.\n Values sampled from already discrete distributions are not changed.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n The other parameter, which's values are to be discretized.\n\n round : bool, optional\n Whether to round before converting to integer dtype.\n\n Added in 0.4.0.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Discretize(iap.Normal(0, 1.0))\n\n Create a discrete standard gaussian distribution.\n\n \"\"\"\n\n def __init__(self, other_param, round=True):\n # pylint: disable=redefined-builtin\n super(Discretize, self).__init__()\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n self.other_param = other_param\n self.round = round\n\n def _draw_samples(self, size, random_state):\n samples = self.other_param.draw_samples(size, random_state=random_state)\n assert samples.dtype.kind in [\"u\", \"i\", \"b\", \"f\"], (\n \"Expected to get uint, int, bool or float dtype as samples in \"\n \"Discretize(), but got dtype '%s' (kind '%s') instead.\"\n % (samples.dtype.name, samples.dtype.kind)\n )\n\n if samples.dtype.kind in [\"u\", \"i\", \"b\"]:\n return samples\n\n # floats seem to reliably cover ints that have half the number of\n # bits -- probably not the case for float128 though as that is\n # really float96\n bitsize = 8 * samples.dtype.itemsize // 2\n # in case some weird system knows something like float8 we set a\n # lower bound here -- shouldn't happen though\n bitsize = max(bitsize, 8)\n dtype = np.dtype(\"int%d\" % (bitsize,))\n if self.round:\n samples = np.round(samples)\n return samples.astype(dtype)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"Discretize(%s, round=%s)\" % (opstr, str(self.round))\n\n\nclass Multiply(StochasticParameter):\n \"\"\"Multiply the samples of another stochastic parameter.\n\n Parameters\n ----------\n other_param : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be multiplied with `val`.\n Let ``S`` be the requested shape of samples, then the datatype\n behaviour is as follows:\n\n * If a single ``number``, this ``number`` will be used as a\n constant value to fill an array of shape ``S``.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, an array of\n shape ``S`` will be filled with uniformly sampled values from\n the continuous interval ``[a, b)``.\n * If a ``list`` of ``number``, an array of shape ``S`` will be\n filled with randomly picked values from the ``list``.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call to generate an array of shape ``S``.\n\n \"per call\" denotes a call of :func:`Multiply.draw_sample` or\n :func:`Multiply.draw_samples`.\n\n val : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Multiplier to use.\n Datatype behaviour is analogous to `other_param`, though if\n ``elementwise=False`` (the default), only a single sample will be\n generated per call instead of ``S``.\n\n elementwise : bool, optional\n Controls the sampling behaviour of `val`.\n If set to ``False``, a single samples will be requested from `val` and\n used as the constant multiplier.\n If set to ``True``, samples of shape ``S`` will be requested from\n `val` and multiplied elementwise with the samples of `other_param`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Multiply(iap.Uniform(0.0, 1.0), -1)\n\n Convert a uniform distribution from ``[0.0, 1.0)`` to ``(-1.0, 0.0]``.\n\n \"\"\"\n\n def __init__(self, other_param, val, elementwise=False):\n super(Multiply, self).__init__()\n\n self.other_param = handle_continuous_param(\n other_param, \"other_param\", prefetch=False\n )\n self.val = handle_continuous_param(val, \"val\", prefetch=False)\n self.elementwise = elementwise\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n elementwise = self.elementwise and not isinstance(self.val, Deterministic)\n\n if elementwise:\n val_samples = self.val.draw_samples(size, random_state=rngs[1])\n else:\n val_samples = self.val.draw_sample(random_state=rngs[1])\n\n if elementwise:\n return np.multiply(samples, val_samples)\n return samples * val_samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Multiply(%s, %s, %s)\" % (\n str(self.other_param),\n str(self.val),\n self.elementwise,\n )\n\n\nclass Divide(StochasticParameter):\n \"\"\"Divide the samples of another stochastic parameter.\n\n This parameter will automatically prevent division by zero (uses 1.0)\n as the denominator in these cases.\n\n Parameters\n ----------\n other_param : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be divided by `val`.\n Let ``S`` be the requested shape of samples, then the datatype\n behaviour is as follows:\n\n * If a single ``number``, this ``number`` will be used as a\n constant value to fill an array of shape ``S``.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, an array of\n shape ``S`` will be filled with uniformly sampled values from\n the continuous interval ``[a, b)``.\n * If a ``list`` of ``number``, an array of shape ``S`` will be\n filled with randomly picked values from the ``list``.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call to generate an array of shape ``S``.\n\n \"per call\" denotes a call of :func:`Divide.draw_sample` or\n :func:`Divide.draw_samples`.\n\n val : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Denominator to use.\n Datatype behaviour is analogous to `other_param`, though if\n ``elementwise=False`` (the default), only a single sample will be\n generated per call instead of ``S``.\n\n elementwise : bool, optional\n Controls the sampling behaviour of `val`.\n If set to ``False``, a single samples will be requested from `val` and\n used as the constant denominator.\n If set to ``True``, samples of shape ``S`` will be requested from\n `val` and used to divide the samples of `other_param` elementwise.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Divide(iap.Uniform(0.0, 1.0), 2)\n\n Convert a uniform distribution ``[0.0, 1.0)`` to ``[0, 0.5)``.\n\n \"\"\"\n\n def __init__(self, other_param, val, elementwise=False):\n super(Divide, self).__init__()\n\n self.other_param = handle_continuous_param(\n other_param, \"other_param\", prefetch=False\n )\n self.val = handle_continuous_param(val, \"val\", prefetch=False)\n self.elementwise = elementwise\n\n def _draw_samples(self, size, random_state):\n # pylint: disable=no-else-return\n rngs = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n elementwise = self.elementwise and not isinstance(self.val, Deterministic)\n\n if elementwise:\n val_samples = self.val.draw_samples(size, random_state=rngs[1])\n\n # prevent division by zero\n val_samples[val_samples == 0] = 1\n\n return np.divide(\n force_np_float_dtype(samples), force_np_float_dtype(val_samples)\n )\n else:\n val_sample = self.val.draw_sample(random_state=rngs[1])\n\n # prevent division by zero\n if val_sample == 0:\n val_sample = 1\n\n return force_np_float_dtype(samples) / float(val_sample)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Divide(%s, %s, %s)\" % (\n str(self.other_param),\n str(self.val),\n self.elementwise,\n )\n\n\n# TODO sampling (N,) from something like 10+Uniform(0, 1) will return\n# N times the same value as (N,) values will be sampled from 10, but only\n# one from Uniform() unless elementwise=True is explicitly set. That\n# seems unintuitive. How can this be prevented?\nclass Add(StochasticParameter):\n \"\"\"Add to the samples of another stochastic parameter.\n\n Parameters\n ----------\n other_param : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Samples of `val` will be added to samples of this parameter.\n Let ``S`` be the requested shape of samples, then the datatype\n behaviour is as follows:\n\n * If a single ``number``, this ``number`` will be used as a\n constant value to fill an array of shape ``S``.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, an array of\n shape ``S`` will be filled with uniformly sampled values from\n the continuous interval ``[a, b)``.\n * If a ``list`` of ``number``, an array of shape ``S`` will be\n filled with randomly picked values from the ``list``.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call to generate an array of shape ``S``.\n\n \"per call\" denotes a call of :func:`Add.draw_sample` or\n :func:`Add.draw_samples`.\n\n val : number or tuple of two number or list of number or imgaug.parameters.StochasticParameter\n Value to add to the samples of `other_param`.\n Datatype behaviour is analogous to `other_param`, though if\n ``elementwise=False`` (the default), only a single sample will be\n generated per call instead of ``S``.\n\n elementwise : bool, optional\n Controls the sampling behaviour of `val`.\n If set to ``False``, a single samples will be requested from `val` and\n used as the constant multiplier.\n If set to ``True``, samples of shape ``S`` will be requested from\n `val` and added elementwise with the samples of `other_param`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Add(Uniform(0.0, 1.0), 1.0)\n\n Convert a uniform distribution from ``[0.0, 1.0)`` to ``[1.0, 2.0)``.\n\n \"\"\"\n\n def __init__(self, other_param, val, elementwise=False):\n super(Add, self).__init__()\n\n self.other_param = handle_continuous_param(\n other_param, \"other_param\", prefetch=False\n )\n self.val = handle_continuous_param(val, \"val\", prefetch=False)\n self.elementwise = elementwise\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n elementwise = self.elementwise and not isinstance(self.val, Deterministic)\n\n if elementwise:\n val_samples = self.val.draw_samples(size, random_state=rngs[1])\n else:\n val_samples = self.val.draw_sample(random_state=rngs[1])\n\n if elementwise:\n return np.add(samples, val_samples)\n return samples + val_samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Add(%s, %s, %s)\" % (\n str(self.other_param),\n str(self.val),\n self.elementwise,\n )\n\n\nclass Subtract(StochasticParameter):\n \"\"\"Subtract from the samples of another stochastic parameter.\n\n Parameters\n ----------\n other_param : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Samples of `val` will be subtracted from samples of this parameter.\n Let ``S`` be the requested shape of samples, then the datatype\n behaviour is as follows:\n\n * If a single ``number``, this ``number`` will be used as a\n constant value to fill an array of shape ``S``.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, an array of\n shape ``S`` will be filled with uniformly sampled values from\n the continuous interval ``[a, b)``.\n * If a ``list`` of ``number``, an array of shape ``S`` will be\n filled with randomly picked values from the ``list``.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call to generate an array of shape ``S``.\n\n \"per call\" denotes a call of :func:`Subtract.draw_sample` or\n :func:`Subtract.draw_samples`.\n\n val : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Value to subtract from the other parameter.\n Datatype behaviour is analogous to `other_param`, though if\n ``elementwise=False`` (the default), only a single sample will be\n generated per call instead of ``S``.\n\n elementwise : bool, optional\n Controls the sampling behaviour of `val`.\n If set to ``False``, a single samples will be requested from `val` and\n used as the constant multiplier.\n If set to ``True``, samples of shape ``S`` will be requested from\n `val` and subtracted elementwise from the samples of `other_param`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Subtract(iap.Uniform(0.0, 1.0), 1.0)\n\n Convert a uniform distribution from ``[0.0, 1.0)`` to ``[-1.0, 0.0)``.\n\n \"\"\"\n\n def __init__(self, other_param, val, elementwise=False):\n super(Subtract, self).__init__()\n\n self.other_param = handle_continuous_param(\n other_param, \"other_param\", prefetch=False\n )\n self.val = handle_continuous_param(val, \"val\", prefetch=False)\n self.elementwise = elementwise\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n elementwise = self.elementwise and not isinstance(self.val, Deterministic)\n\n if elementwise:\n val_samples = self.val.draw_samples(size, random_state=rngs[1])\n else:\n val_samples = self.val.draw_sample(random_state=rngs[1])\n\n if elementwise:\n return np.subtract(samples, val_samples)\n return samples - val_samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Subtract(%s, %s, %s)\" % (\n str(self.other_param),\n str(self.val),\n self.elementwise,\n )\n\n\nclass Power(StochasticParameter):\n \"\"\"Exponentiate the samples of another stochastic parameter.\n\n Parameters\n ----------\n other_param : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be exponentiated by `val`.\n Let ``S`` be the requested shape of samples, then the datatype\n behaviour is as follows:\n\n * If a single ``number``, this ``number`` will be used as a\n constant value to fill an array of shape ``S``.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, an array of\n shape ``S`` will be filled with uniformly sampled values from\n the continuous interval ``[a, b)``.\n * If a ``list`` of ``number``, an array of shape ``S`` will be\n filled with randomly picked values from the ``list``.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call to generate an array of shape ``S``.\n\n \"per call\" denotes a call of :func:`Power.draw_sample` or\n :func:`Power.draw_samples`.\n\n val : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Value to use exponentiate the samples of `other_param`.\n Datatype behaviour is analogous to `other_param`, though if\n ``elementwise=False`` (the default), only a single sample will be\n generated per call instead of ``S``.\n\n elementwise : bool, optional\n Controls the sampling behaviour of `val`.\n If set to ``False``, a single samples will be requested from `val` and\n used as the constant multiplier.\n If set to ``True``, samples of shape ``S`` will be requested from\n `val` and used to exponentiate elementwise the samples of `other_param`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Power(iap.Uniform(0.0, 1.0), 2)\n\n Converts a uniform range ``[0.0, 1.0)`` to a distribution that is peaked\n towards 1.0.\n\n \"\"\"\n\n def __init__(self, other_param, val, elementwise=False):\n super(Power, self).__init__()\n\n self.other_param = handle_continuous_param(\n other_param, \"other_param\", prefetch=False\n )\n self.val = handle_continuous_param(val, \"val\", prefetch=False)\n self.elementwise = elementwise\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n elementwise = self.elementwise and not isinstance(self.val, Deterministic)\n\n if elementwise:\n exponents = self.val.draw_samples(size, random_state=rngs[1])\n else:\n exponents = self.val.draw_sample(random_state=rngs[1])\n\n # without this we get int results in the case of\n # Power(<int>, <stochastic float param>)\n samples, exponents = both_np_float_if_one_is_float(samples, exponents)\n samples_dtype = samples.dtype\n\n # TODO switch to this as numpy>=1.15 is now a requirement\n # float_power requires numpy>=1.12\n # result = np.float_power(samples, exponents)\n # TODO why was float32 type here replaced with complex number\n # formulation?\n result = np.power(samples.astype(np.complex), exponents).real\n if result.dtype != samples_dtype:\n result = result.astype(samples_dtype)\n\n return result\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"Power(%s, %s, %s)\" % (\n str(self.other_param),\n str(self.val),\n self.elementwise,\n )\n\n\nclass Absolute(StochasticParameter):\n \"\"\"Convert the samples of another parameter to their absolute values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be modified.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Absolute(iap.Uniform(-1.0, 1.0))\n\n Convert a uniform distribution from ``[-1.0, 1.0)`` to ``[0.0, 1.0]``.\n\n \"\"\"\n\n def __init__(self, other_param):\n super(Absolute, self).__init__()\n\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n\n self.other_param = other_param\n\n def _draw_samples(self, size, random_state):\n samples = self.other_param.draw_samples(size, random_state=random_state)\n return np.absolute(samples)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"Absolute(%s)\" % (opstr,)\n\n\nclass RandomSign(StochasticParameter):\n \"\"\"Convert a parameter's samples randomly to positive or negative values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be modified.\n\n p_positive : number\n Fraction of values that are supposed to be turned to positive values.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.RandomSign(iap.Poisson(1))\n\n Create a poisson distribution with ``alpha=1`` that is mirrored/copied (not\n flipped) at the y-axis.\n\n \"\"\"\n\n def __init__(self, other_param, p_positive=0.5):\n super(RandomSign, self).__init__()\n\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n assert ia.is_single_number(\n p_positive\n ), \"Expected 'p_positive' to be a number, got %s.\" % (type(p_positive))\n assert (\n 0.0 <= p_positive <= 1.0\n ), \"Expected 'p_positive' to be in the interval [0.0, 1.0], \" \"got %.4f.\" % (\n p_positive,\n )\n\n self.other_param = other_param\n self.p_positive = p_positive\n\n def _draw_samples(self, size, random_state):\n rss = random_state.duplicate(2)\n samples = self.other_param.draw_samples(size, random_state=rss[0])\n # TODO add method to change from uint to int here instead of assert\n assert samples.dtype.kind in [\"f\", \"i\"], (\n \"Expected to get samples of kind float or int, but got dtype %s \"\n \"of kind %s.\" % (samples.dtype.name, samples.dtype.kind)\n )\n # TODO convert to same kind as samples\n coinflips = rss[1].binomial(1, self.p_positive, size=size).astype(np.int8)\n signs = coinflips * 2 - 1\n # Add absolute here to guarantee that we get p_positive percent of\n # positive values. Otherwise we would merely flip p_positive percent\n # of all signs.\n # TODO test if\n # result[coinflips_mask] *= (-1)\n # is faster (with protection against mask being empty?)\n result = np.absolute(samples) * signs\n return result\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"RandomSign(%s, %.2f)\" % (opstr, self.p_positive)\n\n\nclass ForceSign(StochasticParameter):\n \"\"\"Convert a parameter's samples to either positive or negative values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be modified.\n\n positive : bool\n Whether to force all signs to be positive (``True``) or\n negative (``False``).\n\n mode : {'invert', 'reroll'}, optional\n Method to change the signs. Valid values are ``invert`` and ``reroll``.\n ``invert`` means that wrong signs are simply flipped.\n ``reroll`` means that all samples with wrong signs are sampled again,\n optionally many times, until they randomly end up having the correct\n sign.\n\n reroll_count_max : int, optional\n If `mode` is set to ``reroll``, this determines how often values may\n be rerolled before giving up and simply flipping the sign (as in\n ``mode=\"invert\"``). This shouldn't be set too high, as rerolling is\n expensive.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.ForceSign(iap.Poisson(1), positive=False)\n\n Create a poisson distribution with ``alpha=1`` that is flipped towards\n negative values.\n\n \"\"\"\n\n def __init__(self, other_param, positive, mode=\"invert\", reroll_count_max=2):\n super(ForceSign, self).__init__()\n\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n self.other_param = other_param\n\n assert positive in [\n True,\n False,\n ], \"Expected 'positive' to be True or False, got type %s.\" % (type(positive),)\n self.positive = positive\n\n assert mode in [\n \"invert\",\n \"reroll\",\n ], 'Expected \\'mode\\' to be \"invert\" or \"reroll\", got %s.' % (mode,)\n self.mode = mode\n\n assert ia.is_single_integer(\n reroll_count_max\n ), \"Expected 'reroll_count_max' to be an integer, got type %s.\" % (\n type(reroll_count_max)\n )\n self.reroll_count_max = reroll_count_max\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(1 + self.reroll_count_max)\n samples = self.other_param.draw_samples(size, random_state=rngs[0])\n\n if self.mode == \"invert\":\n if self.positive:\n samples[samples < 0] *= -1\n else:\n samples[samples > 0] *= -1\n else:\n if self.positive:\n bad_samples = np.where(samples < 0)[0]\n else:\n bad_samples = np.where(samples > 0)[0]\n\n reroll_count = 0\n while len(bad_samples) > 0 and reroll_count < self.reroll_count_max:\n # This rerolls the full input size, even when only a tiny\n # fraction of the values were wrong. That is done, because not\n # all parameters necessarily support any number of dimensions\n # for `size`, so we cant just resample size=N for N values\n # with wrong signs.\n # There is still quite some room for improvement here.\n samples_reroll = self.other_param.draw_samples(\n size, random_state=rngs[1 + reroll_count]\n )\n samples[bad_samples] = samples_reroll[bad_samples]\n\n reroll_count += 1\n if self.positive:\n bad_samples = np.where(samples < 0)[0]\n else:\n bad_samples = np.where(samples > 0)[0]\n\n if len(bad_samples) > 0:\n samples[bad_samples] *= -1\n\n return samples\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"ForceSign(%s, %s, %s, %d)\" % (\n opstr,\n str(self.positive),\n self.mode,\n self.reroll_count_max,\n )\n\n\ndef Positive(other_param, mode=\"invert\", reroll_count_max=2):\n \"\"\"Convert another parameter's results to positive values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be\n modified.\n\n mode : {'invert', 'reroll'}, optional\n How to change the signs. Valid values are ``invert`` and ``reroll``.\n ``invert`` means that wrong signs are simply flipped.\n ``reroll`` means that all samples with wrong signs are sampled again,\n optionally many times, until they randomly end up having the correct\n sign.\n\n reroll_count_max : int, optional\n If `mode` is set to ``reroll``, this determines how often values may\n be rerolled before giving up and simply flipping the sign (as in\n ``mode=\"invert\"``). This shouldn't be set too high, as rerolling is\n expensive.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Positive(iap.Normal(0, 1), mode=\"reroll\")\n\n Create a gaussian distribution that has only positive values.\n If any negative value is sampled in the process, that sample is resampled\n up to two times to get a positive one. If it isn't positive after the\n second resampling step, the sign is simply flipped.\n\n \"\"\"\n # pylint: disable=invalid-name\n return ForceSign(\n other_param=other_param,\n positive=True,\n mode=mode,\n reroll_count_max=reroll_count_max,\n )\n\n\ndef Negative(other_param, mode=\"invert\", reroll_count_max=2):\n \"\"\"Convert another parameter's results to negative values.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n Other parameter which's sampled values are to be\n modified.\n\n mode : {'invert', 'reroll'}, optional\n How to change the signs. Valid values are ``invert`` and ``reroll``.\n ``invert`` means that wrong signs are simply flipped.\n ``reroll`` means that all samples with wrong signs are sampled again,\n optionally many times, until they randomly end up having the correct\n sign.\n\n reroll_count_max : int, optional\n If `mode` is set to ``reroll``, this determines how often values may\n be rerolled before giving up and simply flipping the sign (as in\n ``mode=\"invert\"``). This shouldn't be set too high, as rerolling is\n expensive.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Negative(iap.Normal(0, 1), mode=\"reroll\")\n\n Create a gaussian distribution that has only negative values.\n If any positive value is sampled in the process, that sample is resampled\n up to two times to get a negative one. If it isn't negative after the\n second resampling step, the sign is simply flipped.\n\n \"\"\"\n # pylint: disable=invalid-name\n return ForceSign(\n other_param=other_param,\n positive=False,\n mode=mode,\n reroll_count_max=reroll_count_max,\n )\n\n\n# TODO this always aggregates the result in high resolution space, instead of\n# aggregating them in low resolution and then only upscaling the final\n# image (for N iterations that would save up to N-1 upscales)\nclass IterativeNoiseAggregator(StochasticParameter):\n \"\"\"Aggregate multiple iterations of samples from another parameter.\n\n This is supposed to be used in conjunction with :class:`SimplexNoise` or\n :class:`FrequencyNoise`. If a shape ``S`` is requested, it will request\n ``I`` times ``S`` samples from the underlying parameter, where ``I`` is\n the number of iterations. The ``I`` arrays will be combined to a single\n array of shape ``S`` using an aggregation method, e.g. simple averaging.\n\n Parameters\n ----------\n other_param : StochasticParameter\n The other parameter from which to sample one or more times.\n\n iterations : int or iterable of int or list of int or imgaug.parameters.StochasticParameter, optional\n The number of iterations.\n\n * If a single ``int``, this ``int`` will be used as a\n constant value.\n * If a ``tuple`` of two ``int`` s ``(a, b)``, the value will be\n sampled from the discrete interval ``[a..b]`` once per call.\n * If a ``list`` of ``int``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of\n :func:`IterativeNoiseAggregator.draw_sample` or\n :func:`IterativeNoiseAggregator.draw_samples`.\n\n aggregation_method : imgaug.ALL or {'min', 'avg', 'max'} or list of str or imgaug.parameters.StochasticParameter, optional\n The method to use to aggregate the samples of multiple iterations\n to a single output array. All methods combine several arrays of\n shape ``S`` each to a single array of shape ``S`` and hence work\n elementwise. Known methods are ``min`` (take the minimum over all\n iterations), ``max`` (take the maximum) and ``avg`` (take the average).\n\n * If an ``str``, it must be one of the described methods and\n will be used for all calls..\n * If a ``list`` of ``str``, it must contain one or more of the\n described methods and a random one will be samples once per call.\n * If ``imgaug.ALL``, then equivalent to the ``list``\n ``[\"min\", \"max\", \"avg\"]``.\n * If :class:`StochasticParameter`, a value will be sampled from\n that parameter once per call and must be one of the described\n methods..\n\n \"per call\" denotes a call of\n :func:`IterativeNoiseAggregator.draw_sample` or\n :func:`IterativeNoiseAggregator.draw_samples`.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> noise = iap.IterativeNoiseAggregator(\n >>> iap.SimplexNoise(),\n >>> iterations=(2, 5),\n >>> aggregation_method=\"max\")\n\n Create a parameter that -- upon each call -- generates ``2`` to ``5``\n arrays of simplex noise with the same shape. Then it combines these\n noise maps to a single map using elementwise maximum.\n\n \"\"\"\n\n def __init__(\n self, other_param, iterations=(1, 3), aggregation_method=[\"max\", \"avg\"]\n ):\n # pylint: disable=dangerous-default-value\n super(IterativeNoiseAggregator, self).__init__()\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n self.other_param = other_param\n\n def _assert_within_bounds(_iterations):\n assert all([1 <= val <= 10000 for val in _iterations]), (\n \"Expected 'iterations' to only contain values within \"\n \"the interval [1, 1000], got values %s.\"\n % (\", \".join([str(val) for val in _iterations]),)\n )\n\n if ia.is_single_integer(iterations):\n _assert_within_bounds([iterations])\n self.iterations = Deterministic(iterations)\n elif isinstance(iterations, list):\n assert len(iterations) > 0, (\n \"Expected 'iterations' of type list to contain at least one \"\n \"entry, got %d.\" % (len(iterations),)\n )\n _assert_within_bounds(iterations)\n self.iterations = Choice(iterations)\n elif ia.is_iterable(iterations):\n assert len(iterations) == 2, (\n \"Expected iterable non-list 'iteratons' to contain exactly \"\n \"two entries, got %d.\" % (len(iterations),)\n )\n assert all([ia.is_single_integer(val) for val in iterations]), (\n \"Expected iterable non-list 'iterations' to only contain \"\n \"integers, got types %s.\"\n % (\", \".join([str(type(val)) for val in iterations]),)\n )\n _assert_within_bounds(iterations)\n self.iterations = DiscreteUniform(iterations[0], iterations[1])\n elif isinstance(iterations, StochasticParameter):\n self.iterations = iterations\n else:\n raise Exception(\n \"Expected iterations to be int or tuple of two ints or \"\n \"StochasticParameter, got %s.\" % (type(iterations),)\n )\n\n if aggregation_method == ia.ALL:\n self.aggregation_method = Choice([\"min\", \"max\", \"avg\"])\n elif ia.is_string(aggregation_method):\n self.aggregation_method = Deterministic(aggregation_method)\n elif isinstance(aggregation_method, list):\n assert (\n len(aggregation_method) >= 1\n ), \"Expected at least one aggregation method got %d.\" % (\n len(aggregation_method),\n )\n assert all(\n [ia.is_string(val) for val in aggregation_method]\n ), \"Expected aggregation methods provided as strings, \" \"got types %s.\" % (\n \", \".join([str(type(v)) for v in aggregation_method])\n )\n self.aggregation_method = Choice(aggregation_method)\n elif isinstance(aggregation_method, StochasticParameter):\n self.aggregation_method = aggregation_method\n else:\n raise Exception(\n \"Expected aggregation_method to be string or list of strings \"\n \"or StochasticParameter, got %s.\" % (type(aggregation_method),)\n )\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(2)\n aggregation_method = self.aggregation_method.draw_sample(random_state=rngs[0])\n iterations = self.iterations.draw_sample(random_state=rngs[1])\n assert (\n iterations > 0\n ), \"Expected to sample at least one iteration of aggregation. \" \"Got %d.\" % (\n iterations,\n )\n\n rngs_iterations = rngs[1].duplicate(iterations)\n\n result = np.zeros(size, dtype=np.float32)\n for i in sm.xrange(iterations):\n noise_iter = self.other_param.draw_samples(\n size, random_state=rngs_iterations[i]\n )\n\n if aggregation_method == \"avg\":\n result += noise_iter\n elif aggregation_method == \"min\":\n if i == 0:\n result = noise_iter\n else:\n result = np.minimum(result, noise_iter)\n else: # self.aggregation_method == \"max\"\n if i == 0:\n result = noise_iter\n else:\n result = np.maximum(result, noise_iter)\n\n if aggregation_method == \"avg\":\n result = result / iterations\n\n return result\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"IterativeNoiseAggregator(%s, %s, %s)\" % (\n opstr,\n str(self.iterations),\n str(self.aggregation_method),\n )\n\n\nclass Sigmoid(StochasticParameter):\n \"\"\"Apply a sigmoid function to the outputs of another parameter.\n\n This is intended to be used in combination with :class:`SimplexNoise` or\n :class:`FrequencyNoise`. It pushes the noise values away from ``~0.5`` and\n towards ``0.0`` or ``1.0``, making the noise maps more binary.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n The other parameter to which the sigmoid will be applied.\n\n threshold : number or tuple of number or iterable of number or imgaug.parameters.StochasticParameter, optional\n Sets the value of the sigmoid's saddle point, i.e. where values\n start to quickly shift from ``0.0`` to ``1.0``.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`Sigmoid.draw_sample` or\n :func:`Sigmoid.draw_samples`.\n\n activated : bool or number, optional\n Defines whether the sigmoid is activated. If this is ``False``, the\n results of `other_param` will not be altered. This may be set to a\n ``float`` ``p`` in value range``[0.0, 1.0]``, which will result in\n `activated` being ``True`` in ``p`` percent of all calls.\n\n mul : number, optional\n The results of `other_param` will be multiplied with this value before\n applying the sigmoid. For noise values (range ``[0.0, 1.0]``) this\n should be set to about ``20``.\n\n add : number, optional\n This value will be added to the results of `other_param` before\n applying the sigmoid. For noise values (range ``[0.0, 1.0]``) this\n should be set to about ``-10.0``, provided `mul` was set to ``20``.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.Sigmoid(\n >>> iap.SimplexNoise(),\n >>> activated=0.5,\n >>> mul=20,\n >>> add=-10)\n\n Applies a sigmoid to simplex noise in ``50%`` of all calls. The noise\n results are modified to match the sigmoid's expected value range. The\n sigmoid's outputs are in the range ``[0.0, 1.0]``.\n\n \"\"\"\n\n def __init__(self, other_param, threshold=(-10, 10), activated=True, mul=1, add=0):\n super(Sigmoid, self).__init__()\n _assert_arg_is_stoch_param(\"other_param\", other_param)\n self.other_param = other_param\n\n self.threshold = handle_continuous_param(threshold, \"threshold\", prefetch=False)\n self.activated = handle_probability_param(\n activated, \"activated\", prefetch=False\n )\n\n assert ia.is_single_number(\n mul\n ), \"Expected 'mul' to be a number, got type %s.\" % (type(mul),)\n assert mul > 0, \"Expected 'mul' to be greater than zero, got %.4f.\" % (mul,)\n self.mul = mul\n\n assert ia.is_single_number(\n add\n ), \"Expected 'add' to be a number, got type %s.\" % (type(add),)\n self.add = add\n\n @staticmethod\n def create_for_noise(other_param, threshold=(-10, 10), activated=True):\n \"\"\"Create a Sigmoid adjusted for noise parameters.\n\n \"noise\" here denotes :class:`SimplexNoise` and :class:`FrequencyNoise`.\n\n Parameters\n ----------\n other_param : imgaug.parameters.StochasticParameter\n See :func:`~imgaug.parameters.Sigmoid.__init__`.\n\n threshold : number or tuple of number or iterable of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.parameters.Sigmoid.__init__`.\n\n activated : bool or number, optional\n See :func:`~imgaug.parameters.Sigmoid.__init__`.\n\n Returns\n -------\n Sigmoid\n A sigmoid adjusted to be used with noise.\n\n \"\"\"\n return Sigmoid(other_param, threshold, activated, mul=20, add=-10)\n\n def _draw_samples(self, size, random_state):\n rngs = random_state.duplicate(3)\n result = self.other_param.draw_samples(size, random_state=rngs[0])\n if result.dtype.kind != \"f\":\n result = result.astype(np.float32)\n activated = self.activated.draw_sample(random_state=rngs[1])\n threshold = self.threshold.draw_sample(random_state=rngs[2])\n if activated > 0.5:\n # threshold must be subtracted here, not added\n # higher threshold = move threshold of sigmoid towards the right\n # = make it harder to pass the threshold\n # = more 0.0s / less 1.0s\n # by subtracting a high value, it moves each x towards the left,\n # leading to more values being left of the threshold, leading\n # to more 0.0s\n return 1 / (1 + np.exp(-(result * self.mul + self.add - threshold)))\n return result\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n opstr = str(self.other_param)\n return \"Sigmoid(%s, %s, %s, %s, %s)\" % (\n opstr,\n str(self.threshold),\n str(self.activated),\n str(self.mul),\n str(self.add),\n )\n\n\nclass SimplexNoise(StochasticParameter):\n \"\"\"Parameter that generates simplex noise of varying resolutions.\n\n This parameter expects to sample noise for 2d planes, i.e. for\n sizes ``(H, W, [C])`` and will return a value in the range ``[0.0, 1.0]``\n per spatial location in that plane.\n\n The noise is sampled from low resolution planes and\n upscaled to the requested height and width. The size of the low\n resolution plane may be defined (large values can be slow) and the\n interpolation method for upscaling can be set.\n\n Parameters\n ----------\n size_px_max : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n Maximum height and width in pixels of the low resolution plane.\n Upon any sampling call, the requested shape will be downscaled until\n the height or width (whichever is larger) does not exceed this maximum\n value anymore. Then the noise will be sampled at that shape and later\n upscaled back to the requested shape.\n\n * If a single ``int``, this ``int`` will be used as a\n constant value.\n * If a ``tuple`` of two ``int`` s ``(a, b)``, the value will be\n sampled from the discrete interval ``[a..b]`` once per call.\n * If a ``list`` of ``int``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`SimplexNoise.draw_sample` or\n :func:`SimplexNoise.draw_samples`.\n\n upscale_method : str or int or list of str or list of int or imgaug.parameters.StochasticParameter, optional\n After generating the noise maps in low resolution environments, they\n have to be upscaled to the originally requested shape (i.e. usually\n the image size). This parameter controls the interpolation method to\n use. See also :func:`~imgaug.imgaug.imresize_many_images` for a\n description of possible values.\n\n * If ``imgaug.ALL``, then either ``nearest`` or ``linear`` or\n ``area`` or ``cubic`` is picked per iteration (all same\n probability).\n * If ``str``, then that value will always be used as the method\n (must be ``nearest`` or ``linear`` or ``area`` or ``cubic``).\n * If ``list`` of ``str``, then a random value will be picked from\n that list per call.\n * If :class:`StochasticParameter`, then a random value will be\n sampled from that parameter per call.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.SimplexNoise(upscale_method=\"linear\")\n\n Create a parameter that produces smooth simplex noise of varying sizes.\n\n >>> param = iap.SimplexNoise(\n >>> size_px_max=(8, 16),\n >>> upscale_method=\"nearest\")\n\n Create a parameter that produces rectangular simplex noise of rather\n high detail.\n\n \"\"\"\n\n def __init__(self, size_px_max=(2, 16), upscale_method=[\"linear\", \"nearest\"]):\n # pylint: disable=dangerous-default-value\n super(SimplexNoise, self).__init__()\n self.size_px_max = handle_discrete_param(\n size_px_max, \"size_px_max\", value_range=(1, 10000)\n )\n\n if upscale_method == ia.ALL:\n self.upscale_method = Choice([\"nearest\", \"linear\", \"area\", \"cubic\"])\n elif ia.is_string(upscale_method):\n self.upscale_method = Deterministic(upscale_method)\n elif isinstance(upscale_method, list):\n assert (\n len(upscale_method) >= 1\n ), \"Expected at least one upscale method, \" \"got %d.\" % (\n len(upscale_method),\n )\n assert all(\n [ia.is_string(val) for val in upscale_method]\n ), \"Expected all upscale methods to be strings, got types %s.\" % (\n \", \".join([str(type(v)) for v in upscale_method])\n )\n self.upscale_method = Choice(upscale_method)\n elif isinstance(upscale_method, StochasticParameter):\n self.upscale_method = upscale_method\n else:\n raise Exception(\n \"Expected upscale_method to be string or list of strings or \"\n \"StochasticParameter, got %s.\" % (type(upscale_method),)\n )\n\n def _draw_samples(self, size, random_state):\n assert len(size) in [2, 3], (\n \"Expected requested noise to have shape (H, W) or (H, W, C), \"\n \"got shape %s.\" % (size,)\n )\n height, width = size[0:2]\n nb_channels = 1 if len(size) == 2 else size[2]\n\n channels = [\n self._draw_samples_hw(height, width, random_state)\n for _ in np.arange(nb_channels)\n ]\n\n if len(size) == 2:\n return channels[0]\n return np.stack(channels, axis=-1)\n\n def _draw_samples_hw(self, height, width, random_state):\n iterations = 1\n rngs = random_state.duplicate(1 + iterations)\n aggregation_method = \"max\"\n upscale_methods = self.upscale_method.draw_samples(\n (iterations,), random_state=rngs[0]\n )\n result = np.zeros((height, width), dtype=np.float32)\n for i in sm.xrange(iterations):\n noise_iter = self._draw_samples_iteration(\n height, width, rngs[1 + i], upscale_methods[i]\n )\n if aggregation_method == \"avg\":\n result += noise_iter\n elif aggregation_method == \"min\":\n if i == 0:\n result = noise_iter\n else:\n result = np.minimum(result, noise_iter)\n else: # self.aggregation_method == \"max\"\n if i == 0:\n result = noise_iter\n else:\n result = np.maximum(result, noise_iter)\n\n if aggregation_method == \"avg\":\n result = result / iterations\n\n return result\n\n def _draw_samples_iteration(self, height, width, rng, upscale_method):\n opensimplex_seed = rng.generate_seed_()\n\n # we have to use int(.) here, otherwise we can get warnings about\n # value overflows in OpenSimplex L103\n generator = OpenSimplex(seed=int(opensimplex_seed))\n\n maxlen = max(height, width)\n size_px_max = self.size_px_max.draw_sample(random_state=rng)\n if maxlen > size_px_max:\n downscale_factor = size_px_max / maxlen\n h_small = int(height * downscale_factor)\n w_small = int(width * downscale_factor)\n else:\n h_small = height\n w_small = width\n\n # don't go below Hx1 or 1xW\n h_small = max(h_small, 1)\n w_small = max(w_small, 1)\n\n noise = np.zeros((h_small, w_small), dtype=np.float32)\n for y in sm.xrange(h_small):\n for x in sm.xrange(w_small):\n noise[y, x] = generator.noise2d(y=y, x=x)\n\n # TODO this was previously (noise+0.5)/2, which was wrong as the noise\n # here is in range [-1.0, 1.0], but this new normalization might\n # lead to bad masks due to too many values being significantly\n # above 0.0 instead of being clipped to 0?\n noise_0to1 = (noise + 1.0) / 2\n noise_0to1 = np.clip(noise_0to1, 0.0, 1.0)\n\n if noise_0to1.shape != (height, width):\n noise_0to1_uint8 = (noise_0to1 * 255).astype(np.uint8)\n noise_0to1_3d = np.tile(noise_0to1_uint8[..., np.newaxis], (1, 1, 3))\n noise_0to1 = ia.imresize_single_image(\n noise_0to1_3d, (height, width), interpolation=upscale_method\n )\n noise_0to1 = (noise_0to1[..., 0] / 255.0).astype(np.float32)\n\n return noise_0to1\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"SimplexNoise(%s, %s)\" % (\n str(self.size_px_max),\n str(self.upscale_method),\n )\n\n\nclass FrequencyNoise(StochasticParameter):\n \"\"\"Parameter to generate noise of varying frequencies.\n\n This parameter expects to sample noise for 2d planes, i.e. for\n sizes ``(H, W, [C])`` and will return a value in the range ``[0.0, 1.0]``\n per spatial location in that plane.\n\n The exponent controls the frequencies and therefore noise patterns.\n Small values (around ``-4.0``) will result in large blobs. Large values\n (around ``4.0``) will result in small, repetitive patterns.\n\n The noise is sampled from low resolution planes and\n upscaled to the requested height and width. The size of the low\n resolution plane may be defined (high values can be slow) and the\n interpolation method for upscaling can be set.\n\n Parameters\n ----------\n exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Exponent to use when scaling in the frequency domain.\n Sane values are in the range ``-4`` (large blobs) to ``4`` (small\n patterns). To generate cloud-like structures, use roughly ``-2``.\n\n * If a single ``number``, this ``number`` will be used as a\n constant value.\n * If a ``tuple`` of two ``number`` s ``(a, b)``, the value will be\n sampled from the continuous interval ``[a, b)`` once per call.\n * If a ``list`` of ``number``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n size_px_max : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n Maximum height and width in pixels of the low resolution plane.\n Upon any sampling call, the requested shape will be downscaled until\n the height or width (whichever is larger) does not exceed this maximum\n value anymore. Then the noise will be sampled at that shape and later\n upscaled back to the requested shape.\n\n * If a single ``int``, this ``int`` will be used as a\n constant value.\n * If a ``tuple`` of two ``int`` s ``(a, b)``, the value will be\n sampled from the discrete interval ``[a..b]`` once per call.\n * If a ``list`` of ``int``, a random value will be picked from\n the ``list`` once per call.\n * If a :class:`StochasticParameter`, that parameter will be\n queried once per call.\n\n \"per call\" denotes a call of :func:`FrequencyNoise.draw_sample` or\n :func:`FrequencyNoise.draw_samples`.\n\n upscale_method : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n After generating the noise maps in low resolution environments, they\n have to be upscaled to the originally requested shape (i.e. usually\n the image size). This parameter controls the interpolation method to\n use. See also :func:`~imgaug.imgaug.imresize_many_images` for a\n description of possible values.\n\n * If ``imgaug.ALL``, then either ``nearest`` or ``linear`` or\n ``area`` or ``cubic`` is picked per iteration (all same\n probability).\n * If ``str``, then that value will always be used as the method\n (must be ``nearest`` or ``linear`` or ``area`` or ``cubic``).\n * If ``list`` of ``str``, then a random value will be picked from\n that list per call.\n * If :class:`StochasticParameter`, then a random value will be\n sampled from that parameter per call.\n\n Examples\n --------\n >>> import imgaug.parameters as iap\n >>> param = iap.FrequencyNoise(\n >>> exponent=-2,\n >>> size_px_max=(16, 32),\n >>> upscale_method=\"linear\")\n\n Create a parameter that produces noise with cloud-like patterns.\n\n \"\"\"\n\n def __init__(\n self,\n exponent=(-4, 4),\n size_px_max=(4, 32),\n upscale_method=[\"linear\", \"nearest\"],\n ):\n # pylint: disable=dangerous-default-value\n super(FrequencyNoise, self).__init__()\n self.exponent = handle_continuous_param(exponent, \"exponent\")\n self.size_px_max = handle_discrete_param(\n size_px_max, \"size_px_max\", value_range=(1, 10000)\n )\n\n if upscale_method == ia.ALL:\n self.upscale_method = Choice([\"nearest\", \"linear\", \"area\", \"cubic\"])\n elif ia.is_string(upscale_method):\n self.upscale_method = Deterministic(upscale_method)\n elif isinstance(upscale_method, list):\n assert (\n len(upscale_method) >= 1\n ), \"Expected at least one upscale method, \" \"got %d.\" % (\n len(upscale_method),\n )\n assert all(\n [ia.is_string(val) for val in upscale_method]\n ), \"Expected all upscale methods to be strings, got types %s.\" % (\n \", \".join([str(type(v)) for v in upscale_method])\n )\n self.upscale_method = Choice(upscale_method)\n elif isinstance(upscale_method, StochasticParameter):\n self.upscale_method = upscale_method\n else:\n raise Exception(\n \"Expected upscale_method to be string or list of strings or \"\n \"StochasticParameter, got %s.\" % (type(upscale_method),)\n )\n\n self._distance_matrix_cache = np.zeros((0, 0), dtype=np.float32)\n\n # TODO this is the same as in SimplexNoise, make DRY\n def _draw_samples(self, size, random_state):\n # code here is similar to:\n # http://www.redblobgames.com/articles/noise/2d/\n # http://www.redblobgames.com/articles/noise/2d/2d-noise.js\n\n assert len(size) in [2, 3], (\n \"Expected requested noise to have shape (H, W) or (H, W, C), \"\n \"got shape %s.\" % (size,)\n )\n height, width = size[0:2]\n nb_channels = 1 if len(size) == 2 else size[2]\n\n channels = [\n self._draw_samples_hw(height, width, random_state)\n for _ in np.arange(nb_channels)\n ]\n\n if len(size) == 2:\n return channels[0]\n return np.stack(channels, axis=-1)\n\n def _draw_samples_hw(self, height, width, random_state):\n maxlen = max(height, width)\n size_px_max = self.size_px_max.draw_sample(random_state=random_state)\n h_small, w_small = height, width\n if maxlen > size_px_max:\n downscale_factor = size_px_max / maxlen\n h_small = int(height * downscale_factor)\n w_small = int(width * downscale_factor)\n\n # don't go below Hx4 or 4xW\n h_small = max(h_small, 4)\n w_small = max(w_small, 4)\n\n # exponents to pronounce some frequencies\n exponent = self.exponent.draw_sample(random_state=random_state)\n\n # base function to invert, derived from a distance matrix (euclidean\n # distance to image center)\n f = self._get_distance_matrix_cached((h_small, w_small))\n\n # prevent divide by zero warnings at the image corners in\n # f**exponent\n f[0, 0] = 1\n f[-1, 0] = 1\n f[0, -1] = 1\n f[-1, -1] = 1\n\n scale = f ** exponent\n\n # invert setting corners to 1\n scale[0, 0] = 0\n scale[-1, 0] = 0\n scale[0, -1] = 0\n scale[-1, -1] = 0\n\n # generate random base matrix\n # first channel: wn_r, second channel: wn_a\n wn = random_state.random(size=(2, h_small, w_small))\n wn[0, ...] *= max(h_small, w_small) ** 2\n wn[1, ...] *= 2 * np.pi\n wn[0, ...] *= np.cos(wn[1, ...])\n wn[1, ...] *= np.sin(wn[1, ...])\n wn *= scale[np.newaxis, :, :]\n wn = wn.transpose((1, 2, 0))\n if wn.dtype != np.float32:\n wn = wn.astype(np.float32)\n\n # equivalent but slightly faster then:\n # wn_freqs_mul = np.zeros(treal.shape, dtype=np.complex)\n # wn_freqs_mul.real = wn[0]\n # wn_freqs_mul.imag = wn[1]\n # wn_inv = np.fft.ifft2(wn_freqs_mul).real\n wn_inv = cv2.idft(wn)[:, :, 0]\n\n # normalize to 0 to 1\n # equivalent to but slightly faster than:\n # wn_inv_min = np.min(wn_inv)\n # wn_inv_max = np.max(wn_inv)\n # noise_0to1 = (wn_inv - wn_inv_min) / (wn_inv_max - wn_inv_min)\n # does not accept wn_inv as dst directly\n noise_0to1 = cv2.normalize(\n wn_inv,\n dst=np.zeros_like(wn_inv),\n alpha=0.01,\n beta=1.0,\n norm_type=cv2.NORM_MINMAX,\n )\n\n # upscale from low resolution to image size\n if noise_0to1.shape != (height, width):\n upscale_method = self.upscale_method.draw_sample(random_state=random_state)\n noise_0to1 = ia.imresize_single_image(\n noise_0to1.astype(np.float32),\n (height, width),\n interpolation=upscale_method,\n )\n if upscale_method == \"cubic\":\n noise_0to1 = np.clip(noise_0to1, 0.0, 1.0)\n\n return noise_0to1\n\n def _get_distance_matrix_cached(self, size):\n cache = self._distance_matrix_cache\n height, width = cache.shape\n if height < size[0] or width < size[1]:\n self._distance_matrix_cache = self._create_distance_matrix(\n (max(height, size[0]), max(width, size[1]))\n )\n\n return self._extract_distance_matrix(self._distance_matrix_cache, size)\n\n @classmethod\n def _extract_distance_matrix(cls, matrix, size):\n height, width = matrix.shape[0:2]\n leftover_y = (height - size[0]) / 2\n leftover_x = (width - size[1]) / 2\n y1 = int(np.floor(leftover_y))\n y2 = height - int(np.ceil(leftover_y))\n x1 = int(np.floor(leftover_x))\n x2 = width - int(np.ceil(leftover_x))\n return matrix[y1:y2, x1:x2]\n\n @classmethod\n def _create_distance_matrix(cls, size):\n def _create_line(line_size):\n start = np.arange(line_size // 2)\n middle = [line_size // 2] if line_size % 2 == 1 else []\n end = start[::-1]\n return np.concatenate([start, middle, end])\n\n height, width = size\n ydist = _create_line(height) ** 2\n xdist = _create_line(width) ** 2\n ydist_2d = np.broadcast_to(ydist[:, np.newaxis], size)\n xdist_2d = np.broadcast_to(xdist[np.newaxis, :], size)\n dist = np.sqrt(ydist_2d + xdist_2d)\n return dist\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"FrequencyNoise(%s, %s, %s)\" % (\n str(self.exponent),\n str(self.size_px_max),\n str(self.upscale_method),\n )\n\n\ndef _assert_arg_is_stoch_param(arg_name, arg_value):\n assert isinstance(\n arg_value, StochasticParameter\n ), \"Expected '%s' to be a StochasticParameter, \" \"got type %s.\" % (\n arg_name,\n arg_value,\n )\n" ]
[ [ "numpy.hstack", "numpy.pad", "numpy.dstack" ], [ "numpy.minimum", "numpy.sqrt", "numpy.dtype", "numpy.concatenate", "numpy.round", "numpy.zeros_like", "numpy.exp", "numpy.histogram", "numpy.where", "numpy.clip", "numpy.arange", "numpy.subtract", "numpy.stack", "numpy.full", "numpy.sin", "numpy.ceil", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.multiply", "numpy.floor", "numpy.array", "numpy.absolute", "numpy.maximum", "scipy.stats.truncnorm", "numpy.cos", "numpy.tile", "numpy.broadcast_to", "numpy.prod", "numpy.add" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AugustasVol/written_test_automation
[ "80d3295f741f4aaa3abaa4e85f20677ff59c146d" ]
[ "nets.py" ]
[ "import numpy as np\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass net_base:\n def trainer(self, x,y, epochs = 1, print_loss = True):\n \n self.train(True)\n \n for i in range(epochs):\n\n self.optimizer.zero_grad() # zero the gradient buffers\n \n output = self(x)\n loss = self.loss_function(output, y)\n loss.backward()\n if print_loss:\n print(loss)\n \n self.optimizer.step() # Does the update\n \n self.train(False)\n def numpy_forward(self, x):\n\n if x.dtype == np.uint8:\n x = x / 255\n\n x = x.astype(np.float32)\n x = torch.from_numpy(x)\n x = autograd.Variable(x)\n output = self(x)\n \n return output.data.numpy()\n def numpy_train(self,x,y, epochs = 1, print_loss = True):\n\n if x.dtype == np.uint8:\n x = x / 255\n \n x = x.astype(np.float32)\n y = y.astype(np.float32)\n \n x = torch.from_numpy(x)\n y = torch.from_numpy(y)\n \n x = autograd.Variable(x)\n y = autograd.Variable(y)\n \n self.trainer(x,y, epochs = epochs, print_loss = print_loss)\n\n def load_weights(self, path):\n self.load_state_dict(torch.load(path))\n def save_weights(self,path):\n torch.save(self.state_dict(), path)\n \n \nclass answer_model(nn.Module, net_base):\n def __init__(self, category_number = 6):\n super(answer_model, self).__init__()\n self.dropout = nn.Dropout(0.05)\n\n #self.conv_start = nn.Conv2d(1, 16, (3,3), stride=(1,1), padding=(1,1))\n\n self.conv00 = nn.Conv2d(1, 15, (2,2), stride=(2,2))\n self.conv01 = nn.Conv2d(15, 16, (2,2), stride=(2,2))\n self.conv02 = nn.Conv2d(16, 16, (1,1), stride=(1,1))\n \n self.conv10 = nn.Conv2d(16, 32, (3,3), stride=(3,3))\n self.conv11 = nn.Conv2d(32,32, (2,2), stride=(1,1))\n self.conv12 = nn.Conv2d(32,32, (1,1), stride=(1,1))\n \n self.conv20 = nn.Conv2d(32, 16, (1,5), stride=(1,2))\n self.conv21 = nn.Conv2d(16, 16, (1,5), stride=(1,2))\n self.conv22 = nn.Conv2d(16, 6, (1,1), stride=(1,1))\n \n self.final_dense = nn.Linear(6,category_number)\n\n self.loss_function = nn.BCELoss()\n self.optimizer = optim.Adam(self.parameters(), lr = 0.0001)\n\n self.train(False)\n def forward(self,x):\n\n #x = F.relu(self.conv_start(x))\n\n #x = self.dropout(x)\n \n x = F.relu(self.conv00(x))\n x = F.relu(self.conv01(x))\n x = F.relu(self.conv02(x))\n\n x = self.dropout(x)\n \n x = F.relu(self.conv10(x))\n x = F.relu(self.conv11(x))\n x = F.relu(self.conv12(x))\n\n x = self.dropout(x)\n\n x = F.relu(self.conv20(x))\n x = F.relu(self.conv21(x))\n x = F.relu(self.conv22(x))\n\n x = x.view(-1, 6)\n\n x = F.sigmoid(self.final_dense(x))\n \n return x\n" ]
[ [ "torch.nn.Dropout", "torch.load", "torch.nn.Conv2d", "torch.from_numpy", "torch.nn.BCELoss", "torch.nn.Linear", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Markek1/style-transfer-gui
[ "14892d3c657242c4825129b56a6668904f53a65e" ]
[ "style_transfer.py" ]
[ "import os\n\nimport tensorflow as tf\n\ndef magenta_v1_256_2(content_image, style_image, resize=False, content_res=None, style_res=None):\n '''Resolution of generated image = resolution of content image.\n Resolution of the style image is 256x256 by default because the net\n was trained on it and it generally works best'''\n if resize:\n if content_res:\n content_image = tf.image.resize(content_image, content_res)\n\n if style_res:\n style_image = tf.image.resize(style_image, style_res)\n else:\n style_image = tf.image.resize(style_image, (256, 256))\n local_path = 'models/magenta_arbitrary-image-stylization-v1-256_2'\n if os.path.exists(local_path):\n model = tf.saved_model.load(local_path)\n image = tf.squeeze(model(tf.constant(content_image), tf.constant(style_image))[0])\n return image\n" ]
[ [ "tensorflow.image.resize", "tensorflow.constant", "tensorflow.saved_model.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skerit/gezicht
[ "5361b06e250400b0f1b44faf6f8940b0f39ed5d9" ]
[ "python/main.py" ]
[ "import face_recognition\nimport importlib\nimport numpy as np\nimport socket\nimport time\nimport json\nimport sys\nimport os\n\nfrom PIL import Image\nfrom io import BytesIO\n\npi_spec = importlib.util.find_spec(\"picamera\")\nfound_picam = pi_spec is not None\n_picam = False\nhas_cv = False\npicam_options = {\n\t'rotation' : 90\n}\n\n# Make stdout flush by default\n#sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)\n\n# Create the face encodings\nencodings = {}\n\ndef getPicam():\n\t# If the module isn't available, return False\n\tif not found_picam:\n\t\treturn False\n\n\t# If the camera hasn't been created yet,\n\t# do so now\n\tif not _picam:\n\t\timport picamera\n\t\t_picam = picamera.PiCamera()\n\n\t\tif picam_options:\n\t\t\t_picam.rotation = picam_options.get('rotation')\n\n\t\t_picam.resolution = (320, 240)\n\n\treturn _picam\n\ndef detectFaceFromPath(path):\n\timage = face_recognition.load_image_file(path)\n\treturn detectFaces(image)\n\ndef detectFaces(frame):\n\n\t# Get the shape of the frame\n\tshape = frame.shape\n\twidth = shape[0]\n\theight = shape[1]\n\n\t# Create the result dictionary\n\tresult = {}\n\tresult['original_size'] = {\n\t\t'width' : width,\n\t\t'height' : height\n\t}\n\n\t# Max size is 450x450\n\tmax_size = 450\n\n\tif width > max_size or height > max_size:\n\t\tif width > height:\n\t\t\tcoef = max_size / width\n\t\telse:\n\t\t\tcoef = max_size / height\n\n\t\tif not has_cv:\n\t\t\timport cv2\n\t\t\thas_cv = True\n\n\t\t# Resize frame of video for faster face recognition processing\n\t\tframe = cv2.resize(frame, (0, 0), fx=coef, fy=coef)\n\n\t\tresult['resized'] = {\n\t\t\t'width' : frame.shape[0],\n\t\t\t'height' : frame.shape[1]\n\t\t}\n\n\tface_locations = face_recognition.face_locations(frame)\n\tface_encodings = face_recognition.face_encodings(frame, face_locations)\n\tface_names = []\n\tfaces = []\n\n\t# Get an array of the known faces\n\tknown_faces = list(encodings.items())\n\tleft_overs = []\n\tremove_seen_faces = True\n\n\t# Loop over each face found in the frame to see if it's someone we know.\n\tfor face_encoding in face_encodings:\n\t\tname = ''\n\n\t\tif remove_seen_faces:\n\t\t\t# Iterate over the known faces,\n\t\t\t# we'll pop one each time\n\t\t\twhile known_faces:\n\t\t\t\t# Shift the first face from the list\n\t\t\t\tface = known_faces.pop(0)\n\t\t\t\tkey = face[0]\n\t\t\t\tvalue = face[1]\n\n\t\t\t\tmatch = face_recognition.compare_faces(value, face_encoding)\n\n\t\t\t\tif (match[0]):\n\t\t\t\t\tname = key\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t# It doesn't match, add it to the leftovers list\n\t\t\t\t\tleft_overs.append(face)\n\n\t\t\t# Add all the left overs back to the face_names\n\t\t\twhile left_overs:\n\t\t\t\tknown_faces.append(left_overs.pop(0))\n\t\telse:\n\t\t\tfor key, value in known_faces:\n\t\t\t\tmatch = face_recognition.compare_faces(value, face_encoding)\n\n\t\t\t\tif match[0]:\n\t\t\t\t\tname = key\n\t\t\t\t\tbreak\n\n\t\tface_names.append(name)\n\n\tfor (top, right, bottom, left), name in zip(face_locations, face_names):\n\t\tentry = {\n\t\t\t'top' : top,\n\t\t\t'right' : right,\n\t\t\t'bottom' : bottom,\n\t\t\t'left' : left,\n\t\t\t'name' : name\n\t\t}\n\n\t\tfaces.append(entry)\n\n\tresult['faces'] = faces\n\n\treturn result\n\n# Start listening to input commands\nwhile 1:\n\tline = sys.stdin.readline()\n\treq = json.loads(line)\n\tcmd = req.get('command')\n\toutput = {}\n\tresult = {}\n\toutput['id'] = req.get('id')\n\toutput['result'] = result;\n\n\tif cmd == 'learn-face':\n\t\tname = req.get('name')\n\t\tpaths = req.get('paths')\n\t\tpath_results = []\n\t\tcount = 0\n\n\t\tif not name in encodings:\n\t\t\tencodings[name] = []\n\n\t\tfor path in paths:\n\t\t\timage = face_recognition.load_image_file(path)\n\t\t\tencoding = face_recognition.face_encodings(image)[0]\n\t\t\tencodings[name].append(encoding)\n\n\t\t\tcount += 1\n\n\t\t\t# Turn the numpy array into a regular list,\n\t\t\t# otherwise it'll fail json encoding later\n\t\t\tpath_results.append(encoding.tolist())\n\n\t\t# Just a check on how many paths we did\n\t\tresult['count'] = count\n\n\t\t# Give the encodings back to the other side,\n\t\t# they might cache them\n\t\tresult['encodings'] = path_results\n\telif cmd == 'add-face-encoding':\n\t\tnew_encodings = req.get('encodings')\n\t\tname = req.get('name')\n\t\tcount = 0\n\n\t\tif not name in encodings:\n\t\t\tencodings[name] = []\n\n\t\tfor encoding in new_encodings:\n\t\t\tencodings[name].append(encoding)\n\t\t\tcount += 1\n\n\t\tresult['count'] = count\n\n\telif cmd == 'detect-face':\n\t\tpath = req.get('file_path')\n\t\tface_result = detectFaceFromPath(path)\n\t\tresult.update(face_result)\n\telif cmd == 'detect-picam':\n\t\tpicam = getPicam()\n\n\t\tif not picam:\n\t\t\toutput['error'] = 'Did not find picamera module'\n\t\telse:\n\n\t\t\tframe = np.empty((240, 320, 3), dtype=np.uint8)\n\t\t\tpicam.capture(frame, format=\"rgb\", use_video_port=True)\n\n\t\t\tface_result = detectFaces(frame)\n\n\t\t\tresult.update(face_result)\n\n\telif cmd == 'detect-stream':\n\t\tpath = req.get('stream_path');\n\n\t\ttry:\n\t\t\tsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\t\t\tsock.connect(path)\n\t\t\tdata = False\n\n\t\t\twhile True:\n\t\t\t\tbuf = sock.recv(4096)\n\n\t\t\t\tif not buf:\n\t\t\t\t\tbreak\n\n\t\t\t\tif not data:\n\t\t\t\t\tdata = buf\n\t\t\t\telse:\n\t\t\t\t\tdata = data + buf\n\n\t\t\tface_result = detectFaceFromPath(BytesIO(data))\n\t\t\tresult.update(face_result)\n\n\t\texcept Exception as e:\n\t\t\toutput['error'] = str(e)\n\n\n\tprint(json.dumps(output), flush=True)\n\tsys.stdout.flush()\n\n\t# We need to sleep for the buffer to flush\n\ttime.sleep(0.05)\n" ]
[ [ "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
disktnk/onnx-chainer
[ "e4542568009e63e7da83aa0f11b2cb5504e8cef8", "e4542568009e63e7da83aa0f11b2cb5504e8cef8", "e4542568009e63e7da83aa0f11b2cb5504e8cef8" ]
[ "onnx_chainer/functions/normalization.py", "onnx_chainer/functions/loss.py", "tests/functions_tests/test_noises.py" ]
[ "import sys\n\nimport chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((1, 6, 7))\ndef convert_BatchNormalization(func, opset_version, input_names,\n output_names, context, parameters):\n is_fixed_bn = len(func.inputs) > 3\n\n # NOTE(disktnk):\n # if `use_beta=False`, beta_param is None, `use_gamma=False` is same.\n beta_param = func.inputs[2].get_variable_or_none()\n gamma_param = func.inputs[1].get_variable_or_none()\n namedlink = context.get_link(beta_param) or context.get_link(gamma_param)\n\n if namedlink is not None:\n prefix, link = namedlink\n if is_fixed_bn:\n mean = link.avg_mean\n var = link.avg_var\n else:\n # on train mode, avg_mean would be updated, so make them from x\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n else:\n prefix = None\n if is_fixed_bn:\n mean = func.inputs[3].get_variable().array\n var = func.inputs[4].get_variable().array\n else:\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n\n def add_param(v, suffix):\n if prefix is None:\n return context.add_param(v, suffix)\n else:\n return context.add_param(\n v, '{}_{}'.format(prefix, suffix), use_original_name=True)\n\n maen_name = add_param(mean, 'avg_mean')\n var_name = add_param(var, 'avg_var')\n if is_fixed_bn:\n input_names[3] = maen_name\n input_names[4] = var_name\n else:\n input_names.extend([maen_name, var_name])\n\n if beta_param is None:\n beta_name = add_param(np.zeros_like(mean, dtype=mean.dtype), 'beta')\n input_names[2] = beta_name\n if gamma_param is None:\n gamma_name = add_param(np.ones_like(mean, dtype=mean.dtype), 'gamma')\n input_names[1] = gamma_name\n\n momentum = getattr(func, 'decay', 0.)\n\n # TODO(disktnk): On definition of ONNX's BatchNormalization operator,\n # outputs one required output and four optional outputs. This converter\n # must make 5 values for output and return them.\n\n if opset_version == 1:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n consumed_inputs=[False, False, False, True, True],\n ),\n elif opset_version == 6:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n ),\n elif opset_version == 7:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n ),\n\n\n@support((1, 6, 7))\ndef convert_FixedBatchNormalization(func, opset_version,\n input_names, output_names, context,\n parameters):\n return convert_BatchNormalization(\n func, opset_version, input_names, output_names, context, parameters)\n\n\ndef convert_LocalResponseNormalization(func, opset_version,\n input_names, output_names, context,\n parameters):\n size = int(func.n)\n return onnx_helper.make_node(\n 'LRN', input_names, output_names,\n alpha=float(func.alpha) * size,\n beta=float(func.beta),\n bias=float(func.k),\n size=size,\n ),\n\n\ndef convert_NormalizeL2(func, opset_version, input_names,\n output_names, context, parameters):\n if isinstance(func.axis, tuple) and len(func.axis) != 1:\n raise ValueError(\n 'Normalization along with multiple axes ({}) are not supported in '\n 'the ONNX\\'s LpNormalization operator.'.format(func.axis))\n if abs(func.eps - 1e-5) > sys.float_info.epsilon:\n # default value of F.normaize eps is 1e-5\n raise ValueError(\n '\\'eps\\' is not supported in the ONNX\\'s LpNormalization operator,'\n ' so that ONNX-Chainer does not accept custom values for \\'eps\\' '\n '({})'.format(func.eps))\n\n return onnx_helper.make_node(\n 'LpNormalization', input_names, output_names,\n axis=int(func.axis[0]),\n p=2,\n ),\n", "import chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((9,))\ndef convert_SoftmaxCrossEntropy(\n func, opset_version, input_names,\n output_names, context, parameters):\n # obtain input variable\n if not isinstance(func, chainer.FunctionNode):\n raise NotImplementedError(\n 'SoftmaxCrossEntropy is currently supported for Chainer>=6.0.0a1.')\n\n x_var, t_var = func.get_retained_inputs()\n if len(x_var.shape) != 2:\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'the dimension of input variable x is exactly two.')\n if np.any(t_var.array == func.ignore_label):\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'ignore_label is not used in input variable t.')\n if (not func.normalize) or (func.class_weight is not None) or\\\n (func.ignore_label != -1) or (func.reduce != 'mean'):\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'argument parameters are default setting.')\n\n # create intermediate values\n gb = onnx_helper.GraphBuilder()\n x, t = input_names\n y_log = gb.op('LogSoftmax', [x])\n depth = context.add_const(np.array([x_var.shape[1]], dtype=np.int32),\n 'depth')\n zeroone = context.add_const(np.array([0, 1], dtype=x_var.dtype), 'zeroone')\n th = gb.op('OneHot', [t, depth, zeroone])\n s0 = gb.op('Mul', [y_log, th])\n sn = gb.op('Neg', [s0])\n sr = gb.op('ReduceSum', [sn], axes=[1], keepdims=0)\n gb.op_output_named('ReduceMean', [sr], output_names, axes=[0], keepdims=0)\n\n return gb.nodes()\n", "import chainer\nimport chainer.functions as F\nfrom chainer import testing\nimport numpy as np\n\nfrom tests.helper import ONNXModelTest\n\n\[email protected](\n {'name': 'dropout', 'ops': lambda x: F.dropout(x, ratio=0.5)},\n)\nclass TestNoises(ONNXModelTest):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self, ops):\n super(Model, self).__init__()\n self.ops = ops\n\n def __call__(self, x):\n with chainer.using_config('train', True):\n y = self.ops(x)\n return y\n\n self.model = Model(self.ops)\n self.x = np.zeros((1, 5), dtype=np.float32)\n\n def test_output(self):\n self.expect(self.model, self.x, name=self.name)\n" ]
[ [ "numpy.ones_like", "numpy.zeros_like" ], [ "numpy.array", "numpy.any" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Abishek15592/pandas
[ "6929e262dd22ac35baabf87a5236d451255fb66d", "6929e262dd22ac35baabf87a5236d451255fb66d" ]
[ "pandas/tests/test_common.py", "pandas/tests/series/apply/test_series_transform.py" ]
[ "import collections\nfrom distutils.version import LooseVersion\nfrom functools import partial\nimport string\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_under1p17\n\nimport pandas as pd\nfrom pandas import Series, Timestamp\nimport pandas._testing as tm\nfrom pandas.core import ops\nimport pandas.core.common as com\n\n\ndef test_get_callable_name():\n getname = com.get_callable_name\n\n def fn(x):\n return x\n\n lambda_ = lambda x: x # noqa: E731\n part1 = partial(fn)\n part2 = partial(part1)\n\n class somecall:\n def __call__(self):\n return x # noqa\n\n assert getname(fn) == \"fn\"\n assert getname(lambda_)\n assert getname(part1) == \"fn\"\n assert getname(part2) == \"fn\"\n assert getname(somecall()) == \"somecall\"\n assert getname(1) is None\n\n\ndef test_any_none():\n assert com.any_none(1, 2, 3, None)\n assert not com.any_none(1, 2, 3, 4)\n\n\ndef test_all_not_none():\n assert com.all_not_none(1, 2, 3, 4)\n assert not com.all_not_none(1, 2, 3, None)\n assert not com.all_not_none(None, None, None, None)\n\n\ndef test_random_state():\n import numpy.random as npr\n\n # Check with seed\n state = com.random_state(5)\n assert state.uniform() == npr.RandomState(5).uniform()\n\n # Check with random state object\n state2 = npr.RandomState(10)\n assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()\n\n # check with no arg random state\n assert com.random_state() is np.random\n\n # check array-like\n # GH32503\n state_arr_like = npr.randint(0, 2 ** 31, size=624, dtype=\"uint32\")\n assert (\n com.random_state(state_arr_like).uniform()\n == npr.RandomState(state_arr_like).uniform()\n )\n\n # Check BitGenerators\n # GH32503\n if not np_version_under1p17:\n assert (\n com.random_state(npr.MT19937(3)).uniform()\n == npr.RandomState(npr.MT19937(3)).uniform()\n )\n assert (\n com.random_state(npr.PCG64(11)).uniform()\n == npr.RandomState(npr.PCG64(11)).uniform()\n )\n\n # Error for floats or strings\n msg = (\n \"random_state must be an integer, array-like, a BitGenerator, \"\n \"a numpy RandomState, or None\"\n )\n with pytest.raises(ValueError, match=msg):\n com.random_state(\"test\")\n\n with pytest.raises(ValueError, match=msg):\n com.random_state(5.5)\n\n\[email protected](\n \"left, right, expected\",\n [\n (Series([1], name=\"x\"), Series([2], name=\"x\"), \"x\"),\n (Series([1], name=\"x\"), Series([2], name=\"y\"), None),\n (Series([1]), Series([2], name=\"x\"), None),\n (Series([1], name=\"x\"), Series([2]), None),\n (Series([1], name=\"x\"), [2], \"x\"),\n ([1], Series([2], name=\"y\"), \"y\"),\n ],\n)\ndef test_maybe_match_name(left, right, expected):\n assert ops._maybe_match_name(left, right) == expected\n\n\ndef test_dict_compat():\n data_datetime64 = {np.datetime64(\"1990-03-15\"): 1, np.datetime64(\"2015-03-15\"): 2}\n data_unchanged = {1: 2, 3: 4, 5: 6}\n expected = {Timestamp(\"1990-3-15\"): 1, Timestamp(\"2015-03-15\"): 2}\n assert com.dict_compat(data_datetime64) == expected\n assert com.dict_compat(expected) == expected\n assert com.dict_compat(data_unchanged) == data_unchanged\n\n\ndef test_standardize_mapping():\n # No uninitialized defaultdicts\n msg = r\"to_dict\\(\\) only accepts initialized defaultdicts\"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(collections.defaultdict)\n\n # No non-mapping subtypes, instance\n msg = \"unsupported type: <class 'list'>\"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping([])\n\n # No non-mapping subtypes, class\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(list)\n\n fill = {\"bad\": \"data\"}\n assert com.standardize_mapping(fill) == dict\n\n # Convert instance to type\n assert com.standardize_mapping({}) == dict\n\n dd = collections.defaultdict(list)\n assert isinstance(com.standardize_mapping(dd), partial)\n\n\ndef test_git_version():\n # GH 21295\n git_version = pd.__git_version__\n assert len(git_version) == 40\n assert all(c in string.hexdigits for c in git_version)\n\n\ndef test_version_tag():\n version = pd.__version__\n try:\n version > LooseVersion(\"0.0.1\")\n except TypeError:\n raise ValueError(\n \"No git tags exist, please sync tags between upstream and your repo\"\n )\n\n\[email protected](\n \"obj\", [(obj,) for obj in pd.__dict__.values() if callable(obj)]\n)\ndef test_serializable(obj):\n # GH 35611\n unpickled = tm.round_trip_pickle(obj)\n assert type(obj) == type(unpickled)\n", "import numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Series, concat\nimport pandas._testing as tm\nfrom pandas.core.base import SpecificationError\nfrom pandas.core.groupby.base import transformation_kernels\n\n\ndef test_transform_ufunc(string_series):\n # GH 35964\n with np.errstate(all=\"ignore\"):\n f_sqrt = np.sqrt(string_series)\n\n # ufunc\n result = string_series.transform(np.sqrt)\n expected = f_sqrt.copy()\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"op\", transformation_kernels)\ndef test_transform_groupby_kernel(string_series, op):\n # GH 35964\n if op == \"cumcount\":\n pytest.xfail(\"Series.cumcount does not exist\")\n if op == \"tshift\":\n pytest.xfail(\"Only works on time index and is deprecated\")\n\n args = [0.0] if op == \"fillna\" else []\n ones = np.ones(string_series.shape[0])\n expected = string_series.groupby(ones).transform(op, *args)\n result = string_series.transform(op, 0, *args)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"ops, names\", [([np.sqrt], [\"sqrt\"]), ([np.abs, np.sqrt], [\"absolute\", \"sqrt\"])]\n)\ndef test_transform_list(string_series, ops, names):\n # GH 35964\n with np.errstate(all=\"ignore\"):\n expected = concat([op(string_series) for op in ops], axis=1)\n expected.columns = names\n result = string_series.transform(ops)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_dict(string_series):\n # GH 35964\n with np.errstate(all=\"ignore\"):\n expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)\n expected.columns = [\"foo\", \"bar\"]\n result = string_series.transform({\"foo\": np.sqrt, \"bar\": np.abs})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_transform_udf(axis, string_series):\n # GH 35964\n # via apply\n def func(x):\n if isinstance(x, Series):\n raise ValueError\n return x + 1\n\n result = string_series.transform(func)\n expected = string_series + 1\n tm.assert_series_equal(result, expected)\n\n # via map Series -> Series\n def func(x):\n if not isinstance(x, Series):\n raise ValueError\n return x + 1\n\n result = string_series.transform(func)\n expected = string_series + 1\n tm.assert_series_equal(result, expected)\n\n\ndef test_transform_wont_agg(string_series):\n # GH 35964\n # we are trying to transform with an aggregator\n msg = \"Function did not transform\"\n with pytest.raises(ValueError, match=msg):\n string_series.transform([\"min\", \"max\"])\n\n msg = \"Function did not transform\"\n with pytest.raises(ValueError, match=msg):\n with np.errstate(all=\"ignore\"):\n string_series.transform([\"sqrt\", \"max\"])\n\n\ndef test_transform_none_to_type():\n # GH34377\n df = DataFrame({\"a\": [None]})\n msg = \"Transform function failed\"\n with pytest.raises(ValueError, match=msg):\n df.transform({\"a\": int})\n\n\ndef test_transform_reducer_raises(all_reductions):\n # GH 35964\n op = all_reductions\n s = Series([1, 2, 3])\n msg = \"Function did not transform\"\n with pytest.raises(ValueError, match=msg):\n s.transform(op)\n with pytest.raises(ValueError, match=msg):\n s.transform([op])\n with pytest.raises(ValueError, match=msg):\n s.transform({\"A\": op})\n with pytest.raises(ValueError, match=msg):\n s.transform({\"A\": [op]})\n\n\n# mypy doesn't allow adding lists of different types\n# https://github.com/python/mypy/issues/5492\[email protected](\"op\", [*transformation_kernels, lambda x: x + 1])\ndef test_transform_bad_dtype(op):\n # GH 35964\n s = Series(3 * [object]) # Series that will fail on most transforms\n if op in (\"backfill\", \"shift\", \"pad\", \"bfill\", \"ffill\"):\n pytest.xfail(\"Transform function works on any datatype\")\n msg = \"Transform function failed\"\n with pytest.raises(ValueError, match=msg):\n s.transform(op)\n with pytest.raises(ValueError, match=msg):\n s.transform([op])\n with pytest.raises(ValueError, match=msg):\n s.transform({\"A\": op})\n with pytest.raises(ValueError, match=msg):\n s.transform({\"A\": [op]})\n\n\[email protected](\"use_apply\", [True, False])\ndef test_transform_passes_args(use_apply):\n # GH 35964\n # transform uses UDF either via apply or passing the entire Series\n expected_args = [1, 2]\n expected_kwargs = {\"c\": 3}\n\n def f(x, a, b, c):\n # transform is using apply iff x is not a Series\n if use_apply == isinstance(x, Series):\n # Force transform to fallback\n raise ValueError\n assert [a, b] == expected_args\n assert c == expected_kwargs[\"c\"]\n return x\n\n Series([1]).transform(f, 0, *expected_args, **expected_kwargs)\n\n\ndef test_transform_axis_1_raises():\n # GH 35964\n msg = \"No axis named 1 for object type Series\"\n with pytest.raises(ValueError, match=msg):\n Series([1]).transform(\"sum\", axis=1)\n\n\ndef test_transform_nested_renamer():\n # GH 35964\n match = \"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=match):\n Series([1]).transform({\"A\": {\"B\": [\"sum\"]}})\n" ]
[ [ "numpy.random.MT19937", "pandas.Series", "pandas.Timestamp", "pandas.core.common.standardize_mapping", "pandas.core.common.all_not_none", "pandas.core.common.random_state", "pandas.core.common.any_none", "numpy.datetime64", "pandas.core.ops._maybe_match_name", "pandas.__dict__.values", "pandas.core.common.dict_compat", "pandas._testing.round_trip_pickle", "numpy.random.RandomState", "numpy.random.PCG64", "numpy.random.randint" ], [ "pandas.Series", "numpy.sqrt", "numpy.abs", "pandas.DataFrame", "numpy.ones", "numpy.errstate", "pandas._testing.assert_series_equal", "pandas._testing.assert_frame_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adbelniak/stable-baselines3
[ "61e3b9c3fc4b113b5de65dd3b083de7550676018" ]
[ "stable_baselines3/common/policies.py" ]
[ "\"\"\"Policies: abstract base class and concrete implementations.\"\"\"\n\nimport collections\nimport copy\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport gym\nimport numpy as np\nimport torch as th\nfrom torch import nn\n\nfrom stable_baselines3.common.distributions import (\n BernoulliDistribution,\n CategoricalDistribution,\n DiagGaussianDistribution,\n Distribution,\n MultiCategoricalDistribution,\n StateDependentNoiseDistribution,\n ConditionalCategoricalDistribution,\n make_proba_distribution,\n)\nfrom stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs\nfrom stable_baselines3.common.torch_layers import (\n BaseFeaturesExtractor,\n CombinedExtractor,\n FlattenExtractor,\n MlpExtractor,\n NatureCNN,\n create_mlp,\n)\nfrom stable_baselines3.common.type_aliases import Schedule\nfrom stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor\n\n\nclass BaseModel(nn.Module, ABC):\n \"\"\"\n The base model object: makes predictions in response to observations.\n\n In the case of policies, the prediction is an action. In the case of critics, it is the\n estimated value of the observation.\n\n :param observation_space: The observation space of the environment\n :param action_space: The action space of the environment\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n features_extractor: Optional[nn.Module] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(BaseModel, self).__init__()\n\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n\n if features_extractor_kwargs is None:\n features_extractor_kwargs = {}\n\n self.observation_space = observation_space\n self.action_space = action_space\n self.features_extractor = features_extractor\n self.normalize_images = normalize_images\n\n self.optimizer_class = optimizer_class\n self.optimizer_kwargs = optimizer_kwargs\n self.optimizer = None # type: Optional[th.optim.Optimizer]\n\n self.features_extractor_class = features_extractor_class\n self.features_extractor_kwargs = features_extractor_kwargs\n\n @abstractmethod\n def forward(self, *args, **kwargs):\n pass\n\n def _update_features_extractor(\n self,\n net_kwargs: Dict[str, Any],\n features_extractor: Optional[BaseFeaturesExtractor] = None,\n ) -> Dict[str, Any]:\n \"\"\"\n Update the network keyword arguments and create a new features extractor object if needed.\n If a ``features_extractor`` object is passed, then it will be shared.\n\n :param net_kwargs: the base network keyword arguments, without the ones\n related to features extractor\n :param features_extractor: a features extractor object.\n If None, a new object will be created.\n :return: The updated keyword arguments\n \"\"\"\n net_kwargs = net_kwargs.copy()\n if features_extractor is None:\n # The features extractor is not shared, create a new one\n features_extractor = self.make_features_extractor()\n net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))\n return net_kwargs\n\n def make_features_extractor(self) -> BaseFeaturesExtractor:\n \"\"\"Helper method to create a features extractor.\"\"\"\n return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)\n\n def extract_features(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Preprocess the observation if needed and extract features.\n\n :param obs:\n :return:\n \"\"\"\n assert self.features_extractor is not None, \"No features extractor was set\"\n preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)\n return self.features_extractor(preprocessed_obs)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n \"\"\"\n Get data that need to be saved in order to re-create the model when loading it from disk.\n\n :return: The dictionary to pass to the as kwargs constructor when reconstruction this model.\n \"\"\"\n return dict(\n observation_space=self.observation_space,\n action_space=self.action_space,\n # Passed to the constructor by child class\n # squash_output=self.squash_output,\n # features_extractor=self.features_extractor\n normalize_images=self.normalize_images,\n )\n\n @property\n def device(self) -> th.device:\n \"\"\"Infer which device this policy lives on by inspecting its parameters.\n If it has no parameters, the 'cpu' device is used as a fallback.\n\n :return:\"\"\"\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")\n\n def save(self, path: str) -> None:\n \"\"\"\n Save model to a given location.\n\n :param path:\n \"\"\"\n th.save({\"state_dict\": self.state_dict(), \"data\": self._get_constructor_parameters()}, path)\n\n @classmethod\n def load(cls, path: str, device: Union[th.device, str] = \"auto\") -> \"BaseModel\":\n \"\"\"\n Load model from path.\n\n :param path:\n :param device: Device on which the policy should be loaded.\n :return:\n \"\"\"\n device = get_device(device)\n saved_variables = th.load(path, map_location=device)\n\n # Allow to load policy saved with older version of SB3\n if \"sde_net_arch\" in saved_variables[\"data\"]:\n warnings.warn(\n \"sde_net_arch is deprecated, please downgrade to SB3 v1.2.0 if you need such parameter.\",\n DeprecationWarning,\n )\n del saved_variables[\"data\"][\"sde_net_arch\"]\n\n # Create policy object\n model = cls(**saved_variables[\"data\"]) # pytype: disable=not-instantiable\n # Load weights\n model.load_state_dict(saved_variables[\"state_dict\"])\n model.to(device)\n return model\n\n def load_from_vector(self, vector: np.ndarray) -> None:\n \"\"\"\n Load parameters from a 1D vector.\n\n :param vector:\n \"\"\"\n th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())\n\n def parameters_to_vector(self) -> np.ndarray:\n \"\"\"\n Convert the parameters to a 1D vector.\n\n :return:\n \"\"\"\n return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()\n\n def set_training_mode(self, mode: bool) -> None:\n \"\"\"\n Put the policy in either training or evaluation mode.\n\n This affects certain modules, such as batch normalisation and dropout.\n\n :param mode: if true, set to training mode, else set to evaluation mode\n \"\"\"\n self.train(mode)\n\n def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:\n \"\"\"\n Convert an input observation to a PyTorch tensor that can be fed to a model.\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :return: The observation as PyTorch tensor\n and whether the observation is vectorized or not\n \"\"\"\n vectorized_env = False\n if isinstance(observation, dict):\n # need to copy the dict as the dict in VecFrameStack will become a torch tensor\n observation = copy.deepcopy(observation)\n for key, obs in observation.items():\n obs_space = self.observation_space.spaces[key]\n if is_image_space(obs_space):\n obs_ = maybe_transpose(obs, obs_space)\n else:\n obs_ = np.array(obs)\n vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)\n # Add batch dimension if needed\n observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)\n\n elif is_image_space(self.observation_space):\n # Handle the different cases for images\n # as PyTorch use channel first format\n observation = maybe_transpose(observation, self.observation_space)\n\n else:\n observation = np.array(observation)\n\n if not isinstance(observation, dict):\n # Dict obs need to be handled separately\n vectorized_env = is_vectorized_observation(observation, self.observation_space)\n # Add batch dimension if needed\n observation = observation.reshape((-1,) + self.observation_space.shape)\n\n observation = obs_as_tensor(observation, self.device)\n return observation, vectorized_env\n\n\nclass BasePolicy(BaseModel):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super(BasePolicy, self).__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy\n actions = actions.cpu().numpy()\n\n if isinstance(self.action_space, gym.spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions[0]\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))\n\n\nclass ActorCriticPolicy(BasePolicy):\n \"\"\"\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n # Small values to avoid NaN in Adam optimizer\n if optimizer_class == th.optim.Adam:\n optimizer_kwargs[\"eps\"] = 1e-5\n\n super(ActorCriticPolicy, self).__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=squash_output,\n )\n\n # Default network architecture, from stable-baselines\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = [dict(pi=[64, 64], vf=[64, 64])]\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.ortho_init = ortho_init\n\n self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)\n self.features_dim = self.features_extractor.features_dim\n\n self.normalize_images = normalize_images\n self.log_std_init = log_std_init\n dist_kwargs = None\n # Keyword arguments for gSDE distribution\n if use_sde:\n dist_kwargs = {\n \"full_std\": full_std,\n \"squash_output\": squash_output,\n \"use_expln\": use_expln,\n \"learn_features\": False,\n }\n\n if sde_net_arch is not None:\n warnings.warn(\"sde_net_arch is deprecated and will be removed in SB3 v2.4.0.\", DeprecationWarning)\n\n self.use_sde = use_sde\n self.dist_kwargs = dist_kwargs\n\n # Action distribution\n self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)\n\n self._build(lr_schedule)\n\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n squash_output=default_none_kwargs[\"squash_output\"],\n full_std=default_none_kwargs[\"full_std\"],\n use_expln=default_none_kwargs[\"use_expln\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n ortho_init=self.ortho_init,\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, n_envs: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix.\n\n :param n_envs:\n \"\"\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), \"reset_noise() is only available when using gSDE\"\n self.action_dist.sample_weights(self.log_std, batch_size=n_envs)\n\n def _build_mlp_extractor(self) -> None:\n \"\"\"\n Create the policy and value networks.\n Part of the layers can be shared.\n \"\"\"\n # Note: If net_arch is None and some features extractor is used,\n # net_arch here is an empty list and mlp_extractor does not\n # really contain any layers (acts like an identity module).\n self.mlp_extractor = MlpExtractor(\n self.features_dim,\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n device=self.device,\n )\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the networks and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n self._build_mlp_extractor()\n\n latent_dim_pi = self.mlp_extractor.latent_dim_pi\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):\n self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n elif isinstance(self.action_dist, (ConditionalCategoricalDistribution)):\n self.action_net, self.embedding, self.other = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n else:\n raise NotImplementedError(f\"Unsupported distribution '{self.action_dist}'.\")\n\n self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n # Init weights: use orthogonal initialization\n # with small initial weight for the output\n if self.ortho_init:\n # TODO: check for features_extractor\n # Values from stable-baselines.\n # features_extractor/mlp values are\n # originally from openai/baselines (default gains/init_scales).\n module_gains = {\n self.features_extractor: np.sqrt(2),\n self.mlp_extractor: np.sqrt(2),\n self.action_net: 0.01,\n self.value_net: 1,\n }\n for module, gain in module_gains.items():\n module.apply(partial(self.init_weights, gain=gain))\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n\n :param obs: Observation\n :param deterministic: Whether to sample or use deterministic actions\n :return: action, value and log probability of the action\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n latent_pi, latent_vf = self.mlp_extractor(features)\n # Evaluate the values for the given observations\n values = self.value_net(latent_vf)\n if isinstance(self.action_dist, ConditionalCategoricalDistribution):\n # mean_actions = self.action_net[0](latent_pi)\n mean_actions = self.action_net(latent_pi)\n # mean_actions = F.relu(mean_actions)\n # distribution = self.action_dist.proba_distribution(mean_actions)\n actions, distribution = self.action_dist.sample_all(mean_actions)\n else:\n distribution = self._get_action_dist_from_latent(latent_pi)\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n return actions, values, log_prob\n\n def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:\n \"\"\"\n Retrieve action distribution given the latent codes.\n\n :param latent_pi: Latent code for the actor\n :return: Action distribution\n \"\"\"\n mean_actions = self.action_net(latent_pi)\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std)\n elif isinstance(self.action_dist, CategoricalDistribution):\n # Here mean_actions are the logits before the softmax\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, MultiCategoricalDistribution):\n # Here mean_actions are the flattened logits\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, BernoulliDistribution):\n # Here mean_actions are the logits (before rounding to get the binary actions)\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)\n else:\n raise ValueError(\"Invalid action distribution\")\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n return self.get_distribution(observation).get_actions(deterministic=deterministic)\n\n def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Evaluate actions according to the current policy,\n given the observations.\n\n :param obs:\n :param actions:\n :return: estimated value, log likelihood of taking those actions\n and entropy of the action distribution.\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n latent_pi, latent_vf = self.mlp_extractor(features)\n if isinstance(self.action_dist, ConditionalCategoricalDistribution):\n mean_actions = self.action_net(latent_pi)\n _, distribution = self.action_dist.sample_all(mean_actions)\n else:\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()\n\n def get_distribution(self, obs: th.Tensor) -> Distribution:\n \"\"\"\n Get the current policy distribution given the observations.\n\n :param obs:\n :return: the action distribution.\n \"\"\"\n features = self.extract_features(obs)\n latent_pi = self.mlp_extractor.forward_actor(features)\n return self._get_action_dist_from_latent(latent_pi)\n\n def predict_values(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the estimated values according to the current policy given the observations.\n\n :param obs:\n :return: the estimated values.\n \"\"\"\n features = self.extract_features(obs)\n latent_vf = self.mlp_extractor.forward_critic(features)\n return self.value_net(latent_vf)\n\n\nclass ActorCriticCnnPolicy(ActorCriticPolicy):\n \"\"\"\n CNN policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(ActorCriticCnnPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n sde_net_arch,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )\n\n\nclass MultiInputActorCriticPolicy(ActorCriticPolicy):\n \"\"\"\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the feature extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Dict,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(MultiInputActorCriticPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n sde_net_arch,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )\n\n\nclass ContinuousCritic(BaseModel):\n \"\"\"\n Critic network(s) for DDPG/SAC/TD3.\n It represents the action-state value function (Q-value function).\n Compared to A2C/PPO critics, this one represents the Q-value\n and takes the continuous action as input. It is concatenated with the state\n and then fed to the network which outputs a single value: Q(s, a).\n For more recent algorithms like SAC/TD3, multiple networks\n are created to give different estimates.\n\n By default, it creates two critic networks used to reduce overestimation\n thanks to clipped Q-learning (cf TD3 paper).\n\n :param observation_space: Obervation space\n :param action_space: Action space\n :param net_arch: Network architecture\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param features_dim: Number of features\n :param activation_fn: Activation function\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether the features extractor is shared or not\n between the actor and the critic (this saves computation time)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n net_arch: List[int],\n features_extractor: nn.Module,\n features_dim: int,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n n_critics: int = 2,\n share_features_extractor: bool = True,\n ):\n super().__init__(\n observation_space,\n action_space,\n features_extractor=features_extractor,\n normalize_images=normalize_images,\n )\n\n action_dim = get_action_dim(self.action_space)\n\n self.share_features_extractor = share_features_extractor\n self.n_critics = n_critics\n self.q_networks = []\n for idx in range(n_critics):\n q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)\n q_net = nn.Sequential(*q_net)\n self.add_module(f\"qf{idx}\", q_net)\n self.q_networks.append(q_net)\n\n def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:\n # Learn the features extractor using the policy loss only\n # when the features_extractor is shared with the actor\n with th.set_grad_enabled(not self.share_features_extractor):\n features = self.extract_features(obs)\n qvalue_input = th.cat([features, actions], dim=1)\n return tuple(q_net(qvalue_input) for q_net in self.q_networks)\n\n def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:\n \"\"\"\n Only predict the Q-value using the first network.\n This allows to reduce computation when all the estimates are not needed\n (e.g. when updating the policy in TD3).\n \"\"\"\n with th.no_grad():\n features = self.extract_features(obs)\n return self.q_networks[0](th.cat([features, actions], dim=1))\n\n\n_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]\n\n\ndef get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:\n \"\"\"\n Returns the registered policy from the base type and name.\n See `register_policy` for registering policies and explanation.\n\n :param base_policy_type: the base policy class\n :param name: the policy name\n :return: the policy\n \"\"\"\n if base_policy_type not in _policy_registry:\n raise KeyError(f\"Error: the policy type {base_policy_type} is not registered!\")\n if name not in _policy_registry[base_policy_type]:\n raise KeyError(\n f\"Error: unknown policy type {name},\"\n f\"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!\"\n )\n return _policy_registry[base_policy_type][name]\n\n\ndef register_policy(name: str, policy: Type[BasePolicy]) -> None:\n \"\"\"\n Register a policy, so it can be called using its name.\n e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).\n\n The goal here is to standardize policy naming, e.g.\n all algorithms can call upon \"MlpPolicy\" or \"CnnPolicy\",\n and they receive respective policies that work for them.\n Consider following:\n\n OnlinePolicy\n -- OnlineMlpPolicy (\"MlpPolicy\")\n -- OnlineCnnPolicy (\"CnnPolicy\")\n OfflinePolicy\n -- OfflineMlpPolicy (\"MlpPolicy\")\n -- OfflineCnnPolicy (\"CnnPolicy\")\n\n Two policies have name \"MlpPolicy\" and two have \"CnnPolicy\".\n In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)\n is given and used to select and return the correct policy.\n\n :param name: the policy name\n :param policy: the policy class\n \"\"\"\n sub_class = None\n for cls in BasePolicy.__subclasses__():\n if issubclass(policy, cls):\n sub_class = cls\n break\n if sub_class is None:\n raise ValueError(f\"Error: the policy {policy} is not of any known subclasses of BasePolicy!\")\n\n if sub_class not in _policy_registry:\n _policy_registry[sub_class] = {}\n if name in _policy_registry[sub_class]:\n # Check if the registered policy is same\n # we try to register. If not so,\n # do not override and complain.\n if _policy_registry[sub_class][name] != policy:\n raise ValueError(f\"Error: the name {name} is already registered for a different policy, will not override.\")\n _policy_registry[sub_class][name] = policy\n" ]
[ [ "torch.nn.Sequential", "numpy.sqrt", "torch.cat", "torch.load", "numpy.clip", "torch.nn.Linear", "torch.set_grad_enabled", "torch.no_grad", "torch.nn.init.orthogonal_", "torch.FloatTensor", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LSchultebraucks/matplotlib_examples
[ "cac02668ce6b81dcbbdf0ff3238cc01506c8f76a" ]
[ "src/mosaic_plot.py" ]
[ "import pandas as pd\nfrom statsmodels.graphics.mosaicplot import mosaic\nimport pylab\nfrom itertools import product\nimport numpy as np\nrand = np.random.random\n\nspeaks_mul_foreign_languages = list(product(['male', 'female'], ['yes', 'no']))\nindex = pd.MultiIndex.from_tuples(speaks_mul_foreign_languages, names=['male', 'female'])\ndata = pd.Series(rand(4), index=index)\n\nmosaic(data, gap=0.01, title='Who knows multiple foregin languages? - Mosaic Chart')\npylab.show()\n" ]
[ [ "pandas.MultiIndex.from_tuples" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lulujianjie/efficient-person-generation-for-reid
[ "1bb29c7c280e3322a65af36b37deecbce0c1d322" ]
[ "data-generation-GAN/generate_samples_market.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nimport sys\nimport cv2\nfrom config.cfg import Cfg\nimport torch\nfrom torch.backends import cudnn\nfrom datasets.bases import read_image\nsys.path.append('.')\nfrom datasets import make_dataloader\nfrom processor import do_inference\nfrom model import make_model\nfrom utils.logger import setup_logger\nimport torchvision.transforms as T\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n#rename img\nimport string\nimport random\n\ndevice = \"cuda\"\nWEIGHT_PATH = './log/model_G_1800.pth'\n#'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth'\n#'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth'\nCfg.freeze()\nos.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\ncudnn.benchmark = True\n\ntest_transforms = T.Compose([\n T.Resize(Cfg.MODEL.INPUT_SIZE),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\nmodel_G, _, _, _ = make_model(Cfg)\nmodel_G.to(device)\n#model_G = nn.DataParallel(model_G)\nmodel_G.load_state_dict(torch.load(WEIGHT_PATH))\n\n\n# In[ ]:\n\n\ndataset = 'DukeMTMC-reID'\nroot_dir = '/home/lujj/datasets/{}/'.format(dataset)\ndata_dir = 'p3'\ntarget_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir)\ntarget_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir)\nimg_list = []\npid_set = set()\nfor img in os.listdir(root_dir+data_dir):\n pid = img.split('_')[0]\n if pid in pid_set:\n continue\n else:\n pid_set.add(pid)\nfor img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)):\n pid = img.split('_')[0]\n if pid in pid_set:\n continue\n else:\n pid_set.add(pid)\n img_list.append(img)\nprint('to generate pid:',len(img_list))\npose_list = np.load(root_dir+'pose_list_duke.npy')\nlen_pose = len(pose_list)\nprint('body-part:',len_pose)\n\n\n# In[ ]:\n\n\nnum_imgs = 24\nmodel_G.eval()\nfor img in img_list:\n if img[-3:] == 'jpg':\n img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img)\n for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False):\n target_pose = pose_list[pose2_idx]\n pose2_path = '/home/lujj/datasets/{}/train_part_heatmap/{}.npy'.format(dataset,target_pose)\n img1 = read_image(img1_path)\n # plt.imshow(img1)\n # plt.show()\n img1 = torch.unsqueeze(test_transforms(img1),0).to(device)\n pose_heatmap2 = np.load(pose2_path).astype(np.float32)\n pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1)))\n pose2 = torch.unsqueeze(pose2,0).to(device)\n input_G = (img1, pose2)\n\n fake_img2 = model_G(input_G)\n result = fake_img2.cpu().detach().numpy()\n img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0\n cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n\n\n# In[ ]:\n\n\nfor img in os.listdir(target_dir):\n src = target_dir+img\n target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n img_ = img.split('-')\n dst = target_dir+img_[0]+target_img\n os.rename(src, dst)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.transpose", "numpy.load", "torch.unsqueeze", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BenRussert/pandas
[ "1f02bf240c3d0d3da338af868d056bfc169b28c2", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae" ]
[ "pandas/core/computation/align.py", "pandas/core/indexes/numeric.py", "pandas/tests/util/test_util.py", "pandas/tests/io/formats/test_to_html.py" ]
[ "\"\"\"Core eval alignment algorithms\n\"\"\"\n\nfrom functools import partial, wraps\nimport warnings\n\nimport numpy as np\n\nfrom pandas.compat import range, zip\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import compat\nimport pandas.core.common as com\nfrom pandas.core.computation.common import _result_type_many\n\n\ndef _align_core_single_unary_op(term):\n if isinstance(term.value, np.ndarray):\n typ = partial(np.asanyarray, dtype=term.value.dtype)\n else:\n typ = type(term.value)\n ret = typ,\n\n if not hasattr(term.value, 'axes'):\n ret += None,\n else:\n ret += _zip_axes_from_type(typ, term.value.axes),\n return ret\n\n\ndef _zip_axes_from_type(typ, new_axes):\n axes = {}\n for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):\n axes[ax_name] = new_axes[ax_ind]\n return axes\n\n\ndef _any_pandas_objects(terms):\n \"\"\"Check a sequence of terms for instances of PandasObject.\"\"\"\n return any(isinstance(term.value, pd.core.generic.PandasObject)\n for term in terms)\n\n\ndef _filter_special_cases(f):\n @wraps(f)\n def wrapper(terms):\n # single unary operand\n if len(terms) == 1:\n return _align_core_single_unary_op(terms[0])\n\n term_values = (term.value for term in terms)\n\n # we don't have any pandas objects\n if not _any_pandas_objects(terms):\n return _result_type_many(*term_values), None\n\n return f(terms)\n return wrapper\n\n\n@_filter_special_cases\ndef _align_core(terms):\n term_index = [i for i, term in enumerate(terms)\n if hasattr(term.value, 'axes')]\n term_dims = [terms[i].value.ndim for i in term_index]\n ndims = pd.Series(dict(zip(term_index, term_dims)))\n\n # initial axes are the axes of the largest-axis'd term\n biggest = terms[ndims.idxmax()].value\n typ = biggest._constructor\n axes = biggest.axes\n naxes = len(axes)\n gt_than_one_axis = naxes > 1\n\n for value in (terms[i].value for i in term_index):\n is_series = isinstance(value, pd.Series)\n is_series_and_gt_one_axis = is_series and gt_than_one_axis\n\n for axis, items in enumerate(value.axes):\n if is_series_and_gt_one_axis:\n ax, itm = naxes - 1, value.index\n else:\n ax, itm = axis, items\n\n if not axes[ax].is_(itm):\n axes[ax] = axes[ax].join(itm, how='outer')\n\n for i, ndim in compat.iteritems(ndims):\n for axis, items in zip(range(ndim), axes):\n ti = terms[i].value\n\n if hasattr(ti, 'reindex'):\n transpose = isinstance(ti, pd.Series) and naxes > 1\n reindexer = axes[naxes - 1] if transpose else items\n\n term_axis_size = len(ti.axes[axis])\n reindexer_size = len(reindexer)\n\n ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))\n if ordm >= 1 and reindexer_size >= 10000:\n w = ('Alignment difference on axis {axis} is larger '\n 'than an order of magnitude on term {term!r}, by '\n 'more than {ordm:.4g}; performance may suffer'\n ).format(axis=axis, term=terms[i].name, ordm=ordm)\n warnings.warn(w, category=PerformanceWarning, stacklevel=6)\n\n f = partial(ti.reindex, reindexer, axis=axis, copy=False)\n\n terms[i].update(f())\n\n terms[i].update(terms[i].value.values)\n\n return typ, _zip_axes_from_type(typ, axes)\n\n\ndef _align(terms):\n \"\"\"Align a set of terms\"\"\"\n try:\n # flatten the parse tree (a nested list, really)\n terms = list(com.flatten(terms))\n except TypeError:\n # can't iterate so it must just be a constant or single variable\n if isinstance(terms.value, pd.core.generic.NDFrame):\n typ = type(terms.value)\n return typ, _zip_axes_from_type(typ, terms.value.axes)\n return np.result_type(terms.type), None\n\n # if all resolved variables are numeric scalars\n if all(term.is_scalar for term in terms):\n return _result_type_many(*(term.value for term in terms)).type, None\n\n # perform the main alignment\n typ, axes = _align_core(terms)\n return typ, axes\n\n\ndef _reconstruct_object(typ, obj, axes, dtype):\n \"\"\"Reconstruct an object given its type, raw value, and possibly empty\n (None) axes.\n\n Parameters\n ----------\n typ : object\n A type\n obj : object\n The value to use in the type constructor\n axes : dict\n The axes to use to construct the resulting pandas object\n\n Returns\n -------\n ret : typ\n An object of type ``typ`` with the value `obj` and possible axes\n `axes`.\n \"\"\"\n try:\n typ = typ.type\n except AttributeError:\n pass\n\n res_t = np.result_type(obj.dtype, dtype)\n\n if (not isinstance(typ, partial) and\n issubclass(typ, pd.core.generic.PandasObject)):\n return typ(obj, dtype=res_t, **axes)\n\n # special case for pathological things like ~True/~False\n if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:\n ret_value = res_t.type(obj)\n else:\n ret_value = typ(obj).astype(res_t)\n # The condition is to distinguish 0-dim array (returned in case of\n # scalar) and 1 element array\n # e.g. np.array(0) and np.array([0])\n if len(obj.shape) == 1 and len(obj) == 1:\n if not isinstance(ret_value, np.ndarray):\n ret_value = np.array([ret_value]).astype(res_t)\n\n return ret_value\n", "import warnings\n\nimport numpy as np\nfrom pandas._libs import index as libindex\nfrom pandas.core.dtypes.common import (\n is_dtype_equal,\n pandas_dtype,\n needs_i8_conversion,\n is_integer_dtype,\n is_float,\n is_bool,\n is_bool_dtype,\n is_scalar)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import compat\nfrom pandas.core import algorithms\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import (\n Index, InvalidIndexError, _index_shared_docs)\nfrom pandas.util._decorators import Appender, cache_readonly\nimport pandas.core.dtypes.concat as _concat\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.ops import get_op_result_name\n\n_num_index_shared_docs = dict()\n\n\nclass NumericIndex(Index):\n \"\"\"\n Provide numeric type operations\n\n This is an abstract class\n\n \"\"\"\n _is_numeric_dtype = True\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None,\n fastpath=None):\n\n if fastpath is not None:\n warnings.warn(\"The 'fastpath' keyword is deprecated, and will be \"\n \"removed in a future version.\",\n FutureWarning, stacklevel=2)\n if fastpath:\n return cls._simple_new(data, name=name)\n\n # is_scalar, generators handled in coerce_to_ndarray\n data = cls._coerce_to_ndarray(data)\n\n if issubclass(data.dtype.type, compat.string_types):\n cls._string_data_error(data)\n\n if copy or not is_dtype_equal(data.dtype, cls._default_dtype):\n subarr = np.array(data, dtype=cls._default_dtype, copy=copy)\n cls._assert_safe_casting(data, subarr)\n else:\n subarr = data\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n return cls._simple_new(subarr, name=name)\n\n @Appender(_index_shared_docs['_maybe_cast_slice_bound'])\n def _maybe_cast_slice_bound(self, label, side, kind):\n assert kind in ['ix', 'loc', 'getitem', None]\n\n # we will try to coerce to integers\n return self._maybe_cast_indexer(label)\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, values=None, **kwargs):\n if values is not None and not self._can_hold_na:\n # Ensure we are not returning an Int64Index with float data:\n return self._shallow_copy_with_infer(values=values, **kwargs)\n return (super(NumericIndex, self)._shallow_copy(values=values,\n **kwargs))\n\n def _convert_for_op(self, value):\n \"\"\" Convert value to be insertable to ndarray \"\"\"\n\n if is_bool(value) or is_bool_dtype(value):\n # force conversion to object\n # so we don't lose the bools\n raise TypeError\n\n return value\n\n def _convert_tolerance(self, tolerance, target):\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError('list-like tolerance size must match '\n 'target index size')\n if not np.issubdtype(tolerance.dtype, np.number):\n if tolerance.ndim > 0:\n raise ValueError(('tolerance argument for %s must contain '\n 'numeric elements if it is list type') %\n (type(self).__name__,))\n else:\n raise ValueError(('tolerance argument for %s must be numeric '\n 'if it is a scalar: %r') %\n (type(self).__name__, tolerance))\n return tolerance\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Subclasses need to override this only if the process of casting data\n from some accepted dtype to the internal dtype(s) bears the risk of\n truncation (e.g. float to int).\n \"\"\"\n pass\n\n def _concat_same_dtype(self, indexes, name):\n return _concat._concat_index_same_dtype(indexes).rename(name)\n\n @property\n def is_all_dates(self):\n \"\"\"\n Checks that all the labels are datetime objects\n \"\"\"\n return False\n\n @Appender(Index.insert.__doc__)\n def insert(self, loc, item):\n # treat NA values as nans:\n if is_scalar(item) and isna(item):\n item = self._na_value\n return super(NumericIndex, self).insert(loc, item)\n\n\n_num_index_shared_docs['class_descr'] = \"\"\"\n Immutable ndarray implementing an ordered, sliceable set. The basic object\n storing axis labels for all pandas objects. %(klass)s is a special case\n of `Index` with purely %(ltype)s labels. %(extra)s\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: %(dtype)s)\n copy : bool\n Make a copy of input ndarray\n name : object\n Name to be stored in the index\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Notes\n -----\n An Index instance can **only** contain hashable objects.\n\n See also\n --------\n Index : The base pandas Index type\n\"\"\"\n\n_int64_descr_args = dict(\n klass='Int64Index',\n ltype='integer',\n dtype='int64',\n extra=''\n)\n\n\nclass IntegerIndex(NumericIndex):\n \"\"\"\n This is an abstract class for Int64Index, UInt64Index.\n \"\"\"\n\n def __contains__(self, key):\n \"\"\"\n Check if key is a float and has a decimal. If it has, return False.\n \"\"\"\n hash(key)\n try:\n if is_float(key) and int(key) != key:\n return False\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n\nclass Int64Index(IntegerIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args\n\n _typ = 'int64index'\n _can_hold_na = False\n _engine_type = libindex.Int64Engine\n _default_dtype = np.int64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'integer' for ``Int64Index``\"\"\"\n return 'integer'\n\n @property\n def asi8(self):\n # do not cache or you'll create a memory leak\n return self.values.view('i8')\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # don't coerce ilocs to integers\n if kind != 'iloc':\n key = self._maybe_cast_indexer(key)\n return (super(Int64Index, self)\n ._convert_scalar_indexer(key, kind=kind))\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return Int64Index(joined, name=name)\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Ensure incoming data can be represented as ints.\n \"\"\"\n if not issubclass(data.dtype.type, np.signedinteger):\n if not np.array_equal(data, subarr):\n raise TypeError('Unsafe NumPy casting, you must '\n 'explicitly cast')\n\n\nInt64Index._add_numeric_methods()\nInt64Index._add_logical_methods()\n\n_uint64_descr_args = dict(\n klass='UInt64Index',\n ltype='unsigned integer',\n dtype='uint64',\n extra=''\n)\n\n\nclass UInt64Index(IntegerIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args\n\n _typ = 'uint64index'\n _can_hold_na = False\n _engine_type = libindex.UInt64Engine\n _default_dtype = np.uint64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'integer' for ``UInt64Index``\"\"\"\n return 'integer'\n\n @property\n def asi8(self):\n # do not cache or you'll create a memory leak\n return self.values.view('u8')\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # don't coerce ilocs to integers\n if kind != 'iloc':\n key = self._maybe_cast_indexer(key)\n return (super(UInt64Index, self)\n ._convert_scalar_indexer(key, kind=kind))\n\n @Appender(_index_shared_docs['_convert_arr_indexer'])\n def _convert_arr_indexer(self, keyarr):\n # Cast the indexer to uint64 if possible so\n # that the values returned from indexing are\n # also uint64.\n keyarr = com.asarray_tuplesafe(keyarr)\n if is_integer_dtype(keyarr):\n return com.asarray_tuplesafe(keyarr, dtype=np.uint64)\n return keyarr\n\n @Appender(_index_shared_docs['_convert_index_indexer'])\n def _convert_index_indexer(self, keyarr):\n # Cast the indexer to uint64 if possible so\n # that the values returned from indexing are\n # also uint64.\n if keyarr.is_integer():\n return keyarr.astype(np.uint64)\n return keyarr\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return UInt64Index(joined, name=name)\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Ensure incoming data can be represented as uints.\n \"\"\"\n if not issubclass(data.dtype.type, np.unsignedinteger):\n if not np.array_equal(data, subarr):\n raise TypeError('Unsafe NumPy casting, you must '\n 'explicitly cast')\n\n\nUInt64Index._add_numeric_methods()\nUInt64Index._add_logical_methods()\n\n_float64_descr_args = dict(\n klass='Float64Index',\n dtype='float64',\n ltype='float',\n extra=''\n)\n\n\nclass Float64Index(NumericIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args\n\n _typ = 'float64index'\n _engine_type = libindex.Float64Engine\n _default_dtype = np.float64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'floating' for ``Float64Index``\"\"\"\n return 'floating'\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if needs_i8_conversion(dtype):\n msg = ('Cannot convert Float64Index to dtype {dtype}; integer '\n 'values are required for conversion').format(dtype=dtype)\n raise TypeError(msg)\n elif is_integer_dtype(dtype) and self.hasnans:\n # GH 13149\n raise ValueError('Cannot convert NA to integer')\n return super(Float64Index, self).astype(dtype, copy=copy)\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n if kind == 'iloc':\n return self._validate_indexer('positional', key, kind)\n\n return key\n\n @Appender(_index_shared_docs['_convert_slice_indexer'])\n def _convert_slice_indexer(self, key, kind=None):\n # if we are not a slice, then we are done\n if not isinstance(key, slice):\n return key\n\n if kind == 'iloc':\n return super(Float64Index, self)._convert_slice_indexer(key,\n kind=kind)\n\n # translate to locations\n return self.slice_indexer(key.start, key.stop, key.step, kind=kind)\n\n def _format_native_types(self, na_rep='', float_format=None, decimal='.',\n quoting=None, **kwargs):\n from pandas.io.formats.format import FloatArrayFormatter\n formatter = FloatArrayFormatter(self.values, na_rep=na_rep,\n float_format=float_format,\n decimal=decimal, quoting=quoting,\n fixed_width=False)\n return formatter.get_result_as_array()\n\n def get_value(self, series, key):\n \"\"\" we always want to get an index value, never a value \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError\n\n k = com.values_from_object(key)\n loc = self.get_loc(k)\n new_values = com.values_from_object(series)[loc]\n\n return new_values\n\n def equals(self, other):\n \"\"\"\n Determines if two Index objects contain the same elements.\n \"\"\"\n if self is other:\n return True\n\n if not isinstance(other, Index):\n return False\n\n # need to compare nans locations and make sure that they are the same\n # since nans don't compare equal this is a bit tricky\n try:\n if not isinstance(other, Float64Index):\n other = self._constructor(other)\n if (not is_dtype_equal(self.dtype, other.dtype) or\n self.shape != other.shape):\n return False\n left, right = self._ndarray_values, other._ndarray_values\n return ((left == right) | (self._isnan & other._isnan)).all()\n except (TypeError, ValueError):\n return False\n\n def __contains__(self, other):\n if super(Float64Index, self).__contains__(other):\n return True\n\n try:\n # if other is a sequence this throws a ValueError\n return np.isnan(other) and self.hasnans\n except ValueError:\n try:\n return len(other) <= 1 and ibase._try_get_item(other) in self\n except TypeError:\n pass\n except TypeError:\n pass\n\n return False\n\n @Appender(_index_shared_docs['get_loc'])\n def get_loc(self, key, method=None, tolerance=None):\n try:\n if np.all(np.isnan(key)) or is_bool(key):\n nan_idxs = self._nan_idxs\n try:\n return nan_idxs.item()\n except (ValueError, IndexError):\n # should only need to catch ValueError here but on numpy\n # 1.7 .item() can raise IndexError when NaNs are present\n if not len(nan_idxs):\n raise KeyError(key)\n return nan_idxs\n except (TypeError, NotImplementedError):\n pass\n return super(Float64Index, self).get_loc(key, method=method,\n tolerance=tolerance)\n\n @cache_readonly\n def is_unique(self):\n return super(Float64Index, self).is_unique and self._nan_idxs.size < 2\n\n @Appender(Index.isin.__doc__)\n def isin(self, values, level=None):\n if level is not None:\n self._validate_index_level(level)\n return algorithms.isin(np.array(self), values)\n\n\nFloat64Index._add_numeric_methods()\nFloat64Index._add_logical_methods_disabled()\n", "# -*- coding: utf-8 -*-\nimport codecs\nfrom collections import OrderedDict\nimport locale\nimport os\nimport sys\nfrom uuid import uuid4\n\nimport pytest\n\nfrom pandas.compat import PY3, intern\nfrom pandas.util._decorators import deprecate_kwarg, make_signature\nfrom pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf\nimport pandas.util._test_decorators as td\nfrom pandas.util._validators import (\n validate_args, validate_args_and_kwargs, validate_bool_kwarg,\n validate_kwargs)\n\nimport pandas.core.common as com\nimport pandas.util.testing as tm\n\n\nclass TestDecorators(object):\n\n def setup_method(self, method):\n @deprecate_kwarg('old', 'new')\n def _f1(new=False):\n return new\n\n @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})\n def _f2(new=False):\n return new\n\n @deprecate_kwarg('old', 'new', lambda x: x + 1)\n def _f3(new=0):\n return new\n\n @deprecate_kwarg('old', None)\n def _f4(old=True, unchanged=True):\n return old\n\n self.f1 = _f1\n self.f2 = _f2\n self.f3 = _f3\n self.f4 = _f4\n\n def test_deprecate_kwarg(self):\n x = 78\n with tm.assert_produces_warning(FutureWarning):\n result = self.f1(old=x)\n assert result is x\n with tm.assert_produces_warning(None):\n self.f1(new=x)\n\n def test_dict_deprecate_kwarg(self):\n x = 'yes'\n with tm.assert_produces_warning(FutureWarning):\n result = self.f2(old=x)\n assert result\n\n def test_missing_deprecate_kwarg(self):\n x = 'bogus'\n with tm.assert_produces_warning(FutureWarning):\n result = self.f2(old=x)\n assert result == 'bogus'\n\n def test_callable_deprecate_kwarg(self):\n x = 5\n with tm.assert_produces_warning(FutureWarning):\n result = self.f3(old=x)\n assert result == x + 1\n with pytest.raises(TypeError):\n self.f3(old='hello')\n\n def test_bad_deprecate_kwarg(self):\n with pytest.raises(TypeError):\n @deprecate_kwarg('old', 'new', 0)\n def f4(new=None):\n pass\n\n def test_deprecate_keyword(self):\n x = 9\n with tm.assert_produces_warning(FutureWarning):\n result = self.f4(old=x)\n assert result is x\n with tm.assert_produces_warning(None):\n result = self.f4(unchanged=x)\n assert result is True\n\n\ndef test_rands():\n r = tm.rands(10)\n assert(len(r) == 10)\n\n\ndef test_rands_array():\n arr = tm.rands_array(5, size=10)\n assert(arr.shape == (10,))\n assert(len(arr[0]) == 5)\n\n arr = tm.rands_array(7, size=(10, 10))\n assert(arr.shape == (10, 10))\n assert(len(arr[1, 1]) == 7)\n\n\nclass TestValidateArgs(object):\n fname = 'func'\n\n def test_bad_min_fname_arg_count(self):\n msg = \"'max_fname_arg_count' must be non-negative\"\n with tm.assert_raises_regex(ValueError, msg):\n validate_args(self.fname, (None,), -1, 'foo')\n\n def test_bad_arg_length_max_value_single(self):\n args = (None, None)\n compat_args = ('foo',)\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"argument \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args(self.fname, args,\n min_fname_arg_count,\n compat_args)\n\n def test_bad_arg_length_max_value_multiple(self):\n args = (None, None)\n compat_args = dict(foo=None)\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"arguments \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args(self.fname, args,\n min_fname_arg_count,\n compat_args)\n\n def test_not_all_defaults(self):\n bad_arg = 'foo'\n msg = (\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n compat_args = OrderedDict()\n compat_args['foo'] = 2\n compat_args['bar'] = -1\n compat_args['baz'] = 3\n\n arg_vals = (1, -1, 3)\n\n for i in range(1, 3):\n with tm.assert_raises_regex(ValueError, msg):\n validate_args(self.fname, arg_vals[:i], 2, compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n validate_args(self.fname, (None,), 2, dict(out=None))\n\n compat_args = OrderedDict()\n compat_args['axis'] = 1\n compat_args['out'] = None\n\n validate_args(self.fname, (1, None), 2, compat_args)\n\n\nclass TestValidateKwargs(object):\n fname = 'func'\n\n def test_bad_kwarg(self):\n goodarg = 'f'\n badarg = goodarg + 'o'\n\n compat_args = OrderedDict()\n compat_args[goodarg] = 'foo'\n compat_args[badarg + 'o'] = 'bar'\n kwargs = {goodarg: 'foo', badarg: 'bar'}\n msg = (r\"{fname}\\(\\) got an unexpected \"\n r\"keyword argument '{arg}'\".format(\n fname=self.fname, arg=badarg))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_not_all_none(self):\n bad_arg = 'foo'\n msg = (r\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n compat_args = OrderedDict()\n compat_args['foo'] = 1\n compat_args['bar'] = 's'\n compat_args['baz'] = None\n\n kwarg_keys = ('foo', 'bar', 'baz')\n kwarg_vals = (2, 's', None)\n\n for i in range(1, 3):\n kwargs = dict(zip(kwarg_keys[:i],\n kwarg_vals[:i]))\n\n with tm.assert_raises_regex(ValueError, msg):\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n compat_args = OrderedDict()\n compat_args['f'] = None\n compat_args['b'] = 1\n compat_args['ba'] = 's'\n kwargs = dict(f=None, b=1)\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_validate_bool_kwarg(self):\n arg_names = ['inplace', 'copy']\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n valid_values = [True, False, None]\n\n for name in arg_names:\n for value in invalid_values:\n with tm.assert_raises_regex(ValueError,\n \"For argument \\\"%s\\\" \"\n \"expected type bool, \"\n \"received type %s\" %\n (name, type(value).__name__)):\n validate_bool_kwarg(value, name)\n\n for value in valid_values:\n assert validate_bool_kwarg(value, name) == value\n\n\nclass TestValidateKwargsAndArgs(object):\n fname = 'func'\n\n def test_invalid_total_length_max_length_one(self):\n compat_args = ('foo',)\n kwargs = {'foo': 'FOO'}\n args = ('FoO', 'BaZ')\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"argument \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_invalid_total_length_max_length_multiple(self):\n compat_args = ('foo', 'bar', 'baz')\n kwargs = {'foo': 'FOO', 'bar': 'BAR'}\n args = ('FoO', 'BaZ')\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"arguments \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_no_args_with_kwargs(self):\n bad_arg = 'bar'\n min_fname_arg_count = 2\n\n compat_args = OrderedDict()\n compat_args['foo'] = -5\n compat_args[bad_arg] = 1\n\n msg = (r\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n args = ()\n kwargs = {'foo': -5, bad_arg: 2}\n tm.assert_raises_regex(ValueError, msg,\n validate_args_and_kwargs,\n self.fname, args, kwargs,\n min_fname_arg_count, compat_args)\n\n args = (-5, 2)\n kwargs = {}\n tm.assert_raises_regex(ValueError, msg,\n validate_args_and_kwargs,\n self.fname, args, kwargs,\n min_fname_arg_count, compat_args)\n\n def test_duplicate_argument(self):\n min_fname_arg_count = 2\n compat_args = OrderedDict()\n compat_args['foo'] = None\n compat_args['bar'] = None\n compat_args['baz'] = None\n kwargs = {'foo': None, 'bar': None}\n args = (None,) # duplicate value for 'foo'\n\n msg = (r\"{fname}\\(\\) got multiple values for keyword \"\n r\"argument '{arg}'\".format(fname=self.fname, arg='foo'))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n compat_args = OrderedDict()\n compat_args['foo'] = 1\n compat_args['bar'] = None\n compat_args['baz'] = -2\n kwargs = {'baz': -2}\n args = (1, None)\n\n min_fname_arg_count = 2\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n\nclass TestMove(object):\n\n def test_cannot_create_instance_of_stolenbuffer(self):\n \"\"\"Stolen buffers need to be created through the smart constructor\n ``move_into_mutable_buffer`` which has a bunch of checks in it.\n \"\"\"\n msg = \"cannot create 'pandas.util._move.stolenbuf' instances\"\n with tm.assert_raises_regex(TypeError, msg):\n stolenbuf()\n\n def test_more_than_one_ref(self):\n \"\"\"Test case for when we try to use ``move_into_mutable_buffer`` when\n the object being moved has other references.\n \"\"\"\n b = b'testing'\n\n with pytest.raises(BadMove) as e:\n def handle_success(type_, value, tb):\n assert value.args[0] is b\n return type(e).handle_success(e, type_, value, tb) # super\n\n e.handle_success = handle_success\n move_into_mutable_buffer(b)\n\n def test_exactly_one_ref(self):\n \"\"\"Test case for when the object being moved has exactly one reference.\n \"\"\"\n b = b'testing'\n\n # We need to pass an expression on the stack to ensure that there are\n # not extra references hanging around. We cannot rewrite this test as\n # buf = b[:-3]\n # as_stolen_buf = move_into_mutable_buffer(buf)\n # because then we would have more than one reference to buf.\n as_stolen_buf = move_into_mutable_buffer(b[:-3])\n\n # materialize as bytearray to show that it is mutable\n assert bytearray(as_stolen_buf) == b'test'\n\n @pytest.mark.skipif(PY3, reason='bytes objects cannot be interned in py3')\n def test_interned(self):\n salt = uuid4().hex\n\n def make_string():\n # We need to actually create a new string so that it has refcount\n # one. We use a uuid so that we know the string could not already\n # be in the intern table.\n return ''.join(('testing: ', salt))\n\n # This should work, the string has one reference on the stack.\n move_into_mutable_buffer(make_string())\n\n refcount = [None] # nonlocal\n\n def ref_capture(ob):\n # Subtract two because those are the references owned by this\n # frame:\n # 1. The local variables of this stack frame.\n # 2. The python data stack of this stack frame.\n refcount[0] = sys.getrefcount(ob) - 2\n return ob\n\n with pytest.raises(BadMove):\n # If we intern the string it will still have one reference but now\n # it is in the intern table so if other people intern the same\n # string while the mutable buffer holds the first string they will\n # be the same instance.\n move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa\n\n assert refcount[0] == 1\n\n\ndef test_numpy_errstate_is_default():\n # The defaults since numpy 1.6.0\n expected = {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',\n 'under': 'ignore'}\n import numpy as np\n from pandas.compat import numpy # noqa\n # The errstate should be unchanged after that import.\n assert np.geterr() == expected\n\n\[email protected]_if_windows\nclass TestLocaleUtils(object):\n\n @classmethod\n def setup_class(cls):\n cls.locales = tm.get_locales()\n cls.current_locale = locale.getlocale()\n\n if not cls.locales:\n pytest.skip(\"No locales found\")\n\n @classmethod\n def teardown_class(cls):\n del cls.locales\n del cls.current_locale\n\n def test_can_set_locale_valid_set(self):\n # Setting the default locale should return True\n assert tm.can_set_locale('') is True\n\n def test_can_set_locale_invalid_set(self):\n # Setting an invalid locale should return False\n assert tm.can_set_locale('non-existent_locale') is False\n\n def test_can_set_locale_invalid_get(self, monkeypatch):\n # In some cases, an invalid locale can be set,\n # but a subsequent getlocale() raises a ValueError\n # See GH 22129\n\n def mockgetlocale():\n raise ValueError()\n\n with monkeypatch.context() as m:\n m.setattr(locale, 'getlocale', mockgetlocale)\n assert tm.can_set_locale('') is False\n\n def test_get_locales(self):\n # all systems should have at least a single locale\n # GH9744\n assert len(tm.get_locales()) > 0\n\n def test_get_locales_prefix(self):\n if len(self.locales) == 1:\n pytest.skip(\"Only a single locale found, no point in \"\n \"trying to test filtering locale prefixes\")\n first_locale = self.locales[0]\n assert len(tm.get_locales(prefix=first_locale[:2])) > 0\n\n def test_set_locale(self):\n if len(self.locales) == 1:\n pytest.skip(\"Only a single locale found, no point in \"\n \"trying to test setting another locale\")\n\n if com._all_none(*self.current_locale):\n # Not sure why, but on some travis runs with pytest,\n # getlocale() returned (None, None).\n pytest.skip(\"Current locale is not set.\")\n\n locale_override = os.environ.get('LOCALE_OVERRIDE', None)\n\n if locale_override is None:\n lang, enc = 'it_CH', 'UTF-8'\n elif locale_override == 'C':\n lang, enc = 'en_US', 'ascii'\n else:\n lang, enc = locale_override.split('.')\n\n enc = codecs.lookup(enc).name\n new_locale = lang, enc\n\n if not tm.can_set_locale(new_locale):\n with pytest.raises(locale.Error):\n with tm.set_locale(new_locale):\n pass\n else:\n with tm.set_locale(new_locale) as normalized_locale:\n new_lang, new_enc = normalized_locale.split('.')\n new_enc = codecs.lookup(enc).name\n normalized_locale = new_lang, new_enc\n assert normalized_locale == new_locale\n\n current_locale = locale.getlocale()\n assert current_locale == self.current_locale\n\n\ndef test_make_signature():\n # See GH 17608\n # Case where the func does not have default kwargs\n sig = make_signature(validate_kwargs)\n assert sig == (['fname', 'kwargs', 'compat_args'],\n ['fname', 'kwargs', 'compat_args'])\n\n # Case where the func does have default kwargs\n sig = make_signature(deprecate_kwarg)\n assert sig == (['old_arg_name', 'new_arg_name',\n 'mapping=None', 'stacklevel=2'],\n ['old_arg_name', 'new_arg_name', 'mapping', 'stacklevel'])\n\n\ndef test_safe_import(monkeypatch):\n assert not td.safe_import(\"foo\")\n assert not td.safe_import(\"pandas\", min_version=\"99.99.99\")\n\n # Create dummy module to be imported\n import types\n import sys\n mod_name = \"hello123\"\n mod = types.ModuleType(mod_name)\n mod.__version__ = \"1.5\"\n\n assert not td.safe_import(mod_name)\n monkeypatch.setitem(sys.modules, mod_name, mod)\n assert not td.safe_import(mod_name, min_version=\"2.0\")\n assert td.safe_import(mod_name, min_version=\"1.0\")\n", "# -*- coding: utf-8 -*-\n\nimport re\nfrom textwrap import dedent\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom pandas import compat, DataFrame, MultiIndex, option_context, Index\nfrom pandas.compat import u, lrange, StringIO\nfrom pandas.util import testing as tm\nimport pandas.io.formats.format as fmt\n\ndiv_style = ''\ntry:\n import IPython\n if LooseVersion(IPython.__version__) < LooseVersion('3.0.0'):\n div_style = ' style=\"max-width:1500px;overflow:auto;\"'\nexcept (ImportError, AttributeError):\n pass\n\n\nclass TestToHTML(object):\n\n def test_to_html_with_col_space(self):\n def check_with_width(df, col_space):\n # check that col_space affects HTML generation\n # and be very brittle about it.\n html = df.to_html(col_space=col_space)\n hdrs = [x for x in html.split(r\"\\n\") if re.search(r\"<th[>\\s]\", x)]\n assert len(hdrs) > 0\n for h in hdrs:\n assert \"min-width\" in h\n assert str(col_space) in h\n\n df = DataFrame(np.random.random(size=(1, 3)))\n\n check_with_width(df, 30)\n check_with_width(df, 50)\n\n def test_to_html_with_empty_string_label(self):\n # GH3547, to_html regards empty string labels as repeated labels\n data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}\n df = DataFrame(data).set_index(['c1', 'c2'])\n res = df.to_html()\n assert \"rowspan\" not in res\n\n def test_to_html_unicode(self):\n df = DataFrame({u('\\u03c3'): np.arange(10.)})\n expected = u'<table border=\"1\" class=\"dataframe\">\\n <thead>\\n <tr style=\"text-align: right;\">\\n <th></th>\\n <th>\\u03c3</th>\\n </tr>\\n </thead>\\n <tbody>\\n <tr>\\n <th>0</th>\\n <td>0.0</td>\\n </tr>\\n <tr>\\n <th>1</th>\\n <td>1.0</td>\\n </tr>\\n <tr>\\n <th>2</th>\\n <td>2.0</td>\\n </tr>\\n <tr>\\n <th>3</th>\\n <td>3.0</td>\\n </tr>\\n <tr>\\n <th>4</th>\\n <td>4.0</td>\\n </tr>\\n <tr>\\n <th>5</th>\\n <td>5.0</td>\\n </tr>\\n <tr>\\n <th>6</th>\\n <td>6.0</td>\\n </tr>\\n <tr>\\n <th>7</th>\\n <td>7.0</td>\\n </tr>\\n <tr>\\n <th>8</th>\\n <td>8.0</td>\\n </tr>\\n <tr>\\n <th>9</th>\\n <td>9.0</td>\\n </tr>\\n </tbody>\\n</table>' # noqa\n assert df.to_html() == expected\n df = DataFrame({'A': [u('\\u03c3')]})\n expected = u'<table border=\"1\" class=\"dataframe\">\\n <thead>\\n <tr style=\"text-align: right;\">\\n <th></th>\\n <th>A</th>\\n </tr>\\n </thead>\\n <tbody>\\n <tr>\\n <th>0</th>\\n <td>\\u03c3</td>\\n </tr>\\n </tbody>\\n</table>' # noqa\n assert df.to_html() == expected\n\n def test_to_html_decimal(self):\n # GH 12031\n df = DataFrame({'A': [6.0, 3.1, 2.2]})\n result = df.to_html(decimal=',')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>6,0</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>3,1</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>2</th>\\n'\n ' <td>2,2</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert result == expected\n\n def test_to_html_escaped(self):\n a = 'str<ing1 &amp;'\n b = 'stri>ng2 &amp;'\n\n test_dict = {'co<l1': {a: \"<type 'str'>\",\n b: \"<type 'str'>\"},\n 'co>l2': {a: \"<type 'str'>\",\n b: \"<type 'str'>\"}}\n rs = DataFrame(test_dict).to_html()\n xp = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>co&lt;l1</th>\n <th>co&gt;l2</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>str&lt;ing1 &amp;amp;</th>\n <td>&lt;type 'str'&gt;</td>\n <td>&lt;type 'str'&gt;</td>\n </tr>\n <tr>\n <th>stri&gt;ng2 &amp;amp;</th>\n <td>&lt;type 'str'&gt;</td>\n <td>&lt;type 'str'&gt;</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert xp == rs\n\n def test_to_html_escape_disabled(self):\n a = 'str<ing1 &amp;'\n b = 'stri>ng2 &amp;'\n\n test_dict = {'co<l1': {a: \"<b>bold</b>\",\n b: \"<b>bold</b>\"},\n 'co>l2': {a: \"<b>bold</b>\",\n b: \"<b>bold</b>\"}}\n rs = DataFrame(test_dict).to_html(escape=False)\n xp = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>co<l1</th>\n <th>co>l2</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>str<ing1 &amp;</th>\n <td><b>bold</b></td>\n <td><b>bold</b></td>\n </tr>\n <tr>\n <th>stri>ng2 &amp;</th>\n <td><b>bold</b></td>\n <td><b>bold</b></td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert xp == rs\n\n def test_to_html_multiindex_index_false(self):\n # issue 8452\n df = DataFrame({\n 'a': range(2),\n 'b': range(3, 5),\n 'c': range(5, 7),\n 'd': range(3, 5)\n })\n df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])\n result = df.to_html(index=False)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th colspan=\"2\" halign=\"left\">a</th>\n <th colspan=\"2\" halign=\"left\">b</th>\n </tr>\n <tr>\n <th>c</th>\n <th>d</th>\n <th>c</th>\n <th>d</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>0</td>\n <td>3</td>\n <td>5</td>\n <td>3</td>\n </tr>\n <tr>\n <td>1</td>\n <td>4</td>\n <td>6</td>\n <td>4</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df.index = Index(df.index.values, name='idx')\n result = df.to_html(index=False)\n assert result == expected\n\n def test_to_html_multiindex_sparsify_false_multi_sparse(self):\n with option_context('display.multi_sparse', False):\n index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],\n names=['foo', None])\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>0</th>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],\n columns=index[::2], index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th>foo</th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>0</th>\n <th>0</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>0</th>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_multiindex_sparsify(self):\n index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],\n names=['foo', None])\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)\n\n result = df.to_html()\n expected = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=index[::2],\n index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th>foo</th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>0</th>\n <th>0</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_multiindex_odd_even_truncate(self):\n # GH 14882 - Issue on truncation with odd length DataFrame\n mi = MultiIndex.from_product([[100, 200, 300],\n [10, 20, 30],\n [1, 2, 3, 4, 5, 6, 7]],\n names=['a', 'b', 'c'])\n df = DataFrame({'n': range(len(mi))}, index=mi)\n result = df.to_html(max_rows=60)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th></th>\n <th>n</th>\n </tr>\n <tr>\n <th>a</th>\n <th>b</th>\n <th>c</th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"21\" valign=\"top\">100</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>1</td>\n </tr>\n <tr>\n <th>3</th>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>3</td>\n </tr>\n <tr>\n <th>5</th>\n <td>4</td>\n </tr>\n <tr>\n <th>6</th>\n <td>5</td>\n </tr>\n <tr>\n <th>7</th>\n <td>6</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>7</td>\n </tr>\n <tr>\n <th>2</th>\n <td>8</td>\n </tr>\n <tr>\n <th>3</th>\n <td>9</td>\n </tr>\n <tr>\n <th>4</th>\n <td>10</td>\n </tr>\n <tr>\n <th>5</th>\n <td>11</td>\n </tr>\n <tr>\n <th>6</th>\n <td>12</td>\n </tr>\n <tr>\n <th>7</th>\n <td>13</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>14</td>\n </tr>\n <tr>\n <th>2</th>\n <td>15</td>\n </tr>\n <tr>\n <th>3</th>\n <td>16</td>\n </tr>\n <tr>\n <th>4</th>\n <td>17</td>\n </tr>\n <tr>\n <th>5</th>\n <td>18</td>\n </tr>\n <tr>\n <th>6</th>\n <td>19</td>\n </tr>\n <tr>\n <th>7</th>\n <td>20</td>\n </tr>\n <tr>\n <th rowspan=\"19\" valign=\"top\">200</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>21</td>\n </tr>\n <tr>\n <th>2</th>\n <td>22</td>\n </tr>\n <tr>\n <th>3</th>\n <td>23</td>\n </tr>\n <tr>\n <th>4</th>\n <td>24</td>\n </tr>\n <tr>\n <th>5</th>\n <td>25</td>\n </tr>\n <tr>\n <th>6</th>\n <td>26</td>\n </tr>\n <tr>\n <th>7</th>\n <td>27</td>\n </tr>\n <tr>\n <th rowspan=\"5\" valign=\"top\">20</th>\n <th>1</th>\n <td>28</td>\n </tr>\n <tr>\n <th>2</th>\n <td>29</td>\n </tr>\n <tr>\n <th>...</th>\n <td>...</td>\n </tr>\n <tr>\n <th>6</th>\n <td>33</td>\n </tr>\n <tr>\n <th>7</th>\n <td>34</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>35</td>\n </tr>\n <tr>\n <th>2</th>\n <td>36</td>\n </tr>\n <tr>\n <th>3</th>\n <td>37</td>\n </tr>\n <tr>\n <th>4</th>\n <td>38</td>\n </tr>\n <tr>\n <th>5</th>\n <td>39</td>\n </tr>\n <tr>\n <th>6</th>\n <td>40</td>\n </tr>\n <tr>\n <th>7</th>\n <td>41</td>\n </tr>\n <tr>\n <th rowspan=\"21\" valign=\"top\">300</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>42</td>\n </tr>\n <tr>\n <th>2</th>\n <td>43</td>\n </tr>\n <tr>\n <th>3</th>\n <td>44</td>\n </tr>\n <tr>\n <th>4</th>\n <td>45</td>\n </tr>\n <tr>\n <th>5</th>\n <td>46</td>\n </tr>\n <tr>\n <th>6</th>\n <td>47</td>\n </tr>\n <tr>\n <th>7</th>\n <td>48</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>49</td>\n </tr>\n <tr>\n <th>2</th>\n <td>50</td>\n </tr>\n <tr>\n <th>3</th>\n <td>51</td>\n </tr>\n <tr>\n <th>4</th>\n <td>52</td>\n </tr>\n <tr>\n <th>5</th>\n <td>53</td>\n </tr>\n <tr>\n <th>6</th>\n <td>54</td>\n </tr>\n <tr>\n <th>7</th>\n <td>55</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>56</td>\n </tr>\n <tr>\n <th>2</th>\n <td>57</td>\n </tr>\n <tr>\n <th>3</th>\n <td>58</td>\n </tr>\n <tr>\n <th>4</th>\n <td>59</td>\n </tr>\n <tr>\n <th>5</th>\n <td>60</td>\n </tr>\n <tr>\n <th>6</th>\n <td>61</td>\n </tr>\n <tr>\n <th>7</th>\n <td>62</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n # Test that ... appears in a middle level\n result = df.to_html(max_rows=56)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th></th>\n <th>n</th>\n </tr>\n <tr>\n <th>a</th>\n <th>b</th>\n <th>c</th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"21\" valign=\"top\">100</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>1</td>\n </tr>\n <tr>\n <th>3</th>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>3</td>\n </tr>\n <tr>\n <th>5</th>\n <td>4</td>\n </tr>\n <tr>\n <th>6</th>\n <td>5</td>\n </tr>\n <tr>\n <th>7</th>\n <td>6</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>7</td>\n </tr>\n <tr>\n <th>2</th>\n <td>8</td>\n </tr>\n <tr>\n <th>3</th>\n <td>9</td>\n </tr>\n <tr>\n <th>4</th>\n <td>10</td>\n </tr>\n <tr>\n <th>5</th>\n <td>11</td>\n </tr>\n <tr>\n <th>6</th>\n <td>12</td>\n </tr>\n <tr>\n <th>7</th>\n <td>13</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>14</td>\n </tr>\n <tr>\n <th>2</th>\n <td>15</td>\n </tr>\n <tr>\n <th>3</th>\n <td>16</td>\n </tr>\n <tr>\n <th>4</th>\n <td>17</td>\n </tr>\n <tr>\n <th>5</th>\n <td>18</td>\n </tr>\n <tr>\n <th>6</th>\n <td>19</td>\n </tr>\n <tr>\n <th>7</th>\n <td>20</td>\n </tr>\n <tr>\n <th rowspan=\"15\" valign=\"top\">200</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>21</td>\n </tr>\n <tr>\n <th>2</th>\n <td>22</td>\n </tr>\n <tr>\n <th>3</th>\n <td>23</td>\n </tr>\n <tr>\n <th>4</th>\n <td>24</td>\n </tr>\n <tr>\n <th>5</th>\n <td>25</td>\n </tr>\n <tr>\n <th>6</th>\n <td>26</td>\n </tr>\n <tr>\n <th>7</th>\n <td>27</td>\n </tr>\n <tr>\n <th>...</th>\n <th>...</th>\n <td>...</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>35</td>\n </tr>\n <tr>\n <th>2</th>\n <td>36</td>\n </tr>\n <tr>\n <th>3</th>\n <td>37</td>\n </tr>\n <tr>\n <th>4</th>\n <td>38</td>\n </tr>\n <tr>\n <th>5</th>\n <td>39</td>\n </tr>\n <tr>\n <th>6</th>\n <td>40</td>\n </tr>\n <tr>\n <th>7</th>\n <td>41</td>\n </tr>\n <tr>\n <th rowspan=\"21\" valign=\"top\">300</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>42</td>\n </tr>\n <tr>\n <th>2</th>\n <td>43</td>\n </tr>\n <tr>\n <th>3</th>\n <td>44</td>\n </tr>\n <tr>\n <th>4</th>\n <td>45</td>\n </tr>\n <tr>\n <th>5</th>\n <td>46</td>\n </tr>\n <tr>\n <th>6</th>\n <td>47</td>\n </tr>\n <tr>\n <th>7</th>\n <td>48</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>49</td>\n </tr>\n <tr>\n <th>2</th>\n <td>50</td>\n </tr>\n <tr>\n <th>3</th>\n <td>51</td>\n </tr>\n <tr>\n <th>4</th>\n <td>52</td>\n </tr>\n <tr>\n <th>5</th>\n <td>53</td>\n </tr>\n <tr>\n <th>6</th>\n <td>54</td>\n </tr>\n <tr>\n <th>7</th>\n <td>55</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>56</td>\n </tr>\n <tr>\n <th>2</th>\n <td>57</td>\n </tr>\n <tr>\n <th>3</th>\n <td>58</td>\n </tr>\n <tr>\n <th>4</th>\n <td>59</td>\n </tr>\n <tr>\n <th>5</th>\n <td>60</td>\n </tr>\n <tr>\n <th>6</th>\n <td>61</td>\n </tr>\n <tr>\n <th>7</th>\n <td>62</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_index_formatter(self):\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=['foo', None],\n index=lrange(4))\n\n f = lambda x: 'abcd' [x]\n result = df.to_html(formatters={'__index__': f})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>foo</th>\n <th>None</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>a</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>b</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>c</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>d</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_datetime64_monthformatter(self):\n months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]\n x = DataFrame({'months': months})\n\n def format_func(x):\n return x.strftime('%Y-%m')\n result = x.to_html(formatters={'months': format_func})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>months</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>2016-01</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2016-02</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_datetime64_hourformatter(self):\n\n x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],\n format='%H:%M:%S.%f')})\n\n def format_func(x):\n return x.strftime('%H:%M')\n result = x.to_html(formatters={'hod': format_func})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>hod</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>10:10</td>\n </tr>\n <tr>\n <th>1</th>\n <td>12:12</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_regression_GH6098(self):\n df = DataFrame({\n u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')],\n u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')],\n 'données1': np.random.randn(5),\n 'données2': np.random.randn(5)})\n\n # it works\n df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_()\n\n def test_to_html_truncate(self):\n index = pd.DatetimeIndex(start='20010101', freq='D', periods=20)\n df = DataFrame(index=index, columns=range(20))\n result = df.to_html(max_rows=8, max_cols=4)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>0</th>\n <th>1</th>\n <th>...</th>\n <th>18</th>\n <th>19</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>2001-01-01</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-02</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-03</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-04</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>...</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>2001-01-17</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-18</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-19</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-20</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n def test_to_html_truncate_multi_index(self):\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n df = DataFrame(index=arrays, columns=arrays)\n result = df.to_html(max_rows=7, max_cols=7)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th colspan=\"2\" halign=\"left\">bar</th>\n <th>baz</th>\n <th>...</th>\n <th>foo</th>\n <th colspan=\"2\" halign=\"left\">qux</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>one</th>\n <th>two</th>\n <th>one</th>\n <th>...</th>\n <th>two</th>\n <th>one</th>\n <th>two</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">bar</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>baz</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>...</th>\n <th>...</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>foo</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">qux</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n @pytest.mark.xfail(reason='GH22887 TypeError', strict=True)\n def test_to_html_truncate_multi_index_sparse_off(self):\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n df = DataFrame(index=arrays, columns=arrays)\n result = df.to_html(max_rows=7, max_cols=7, sparsify=False)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th>bar</th>\n <th>bar</th>\n <th>baz</th>\n <th>...</th>\n <th>foo</th>\n <th>qux</th>\n <th>qux</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>one</th>\n <th>two</th>\n <th>one</th>\n <th>...</th>\n <th>two</th>\n <th>one</th>\n <th>two</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>bar</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bar</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>baz</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>foo</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>qux</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>qux</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n def test_to_html_border(self):\n df = DataFrame({'A': [1, 2]})\n result = df.to_html()\n assert 'border=\"1\"' in result\n\n def test_to_html_border_option(self):\n df = DataFrame({'A': [1, 2]})\n with pd.option_context('display.html.border', 0):\n result = df.to_html()\n assert 'border=\"0\"' in result\n assert 'border=\"0\"' in df._repr_html_()\n\n def test_to_html_border_zero(self):\n df = DataFrame({'A': [1, 2]})\n result = df.to_html(border=0)\n assert 'border=\"0\"' in result\n\n @tm.capture_stdout\n def test_display_option_warning(self):\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n pd.options.html.border\n\n def test_to_html(self):\n # big mixed\n biggie = DataFrame({'A': np.random.randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n\n biggie.loc[:20, 'A'] = np.nan\n biggie.loc[:20, 'B'] = np.nan\n s = biggie.to_html()\n\n buf = StringIO()\n retval = biggie.to_html(buf=buf)\n assert retval is None\n assert buf.getvalue() == s\n\n assert isinstance(s, compat.string_types)\n\n biggie.to_html(columns=['B', 'A'], col_space=17)\n biggie.to_html(columns=['B', 'A'],\n formatters={'A': lambda x: '{x:.1f}'.format(x=x)})\n\n biggie.to_html(columns=['B', 'A'], float_format=str)\n biggie.to_html(columns=['B', 'A'], col_space=12, float_format=str)\n\n frame = DataFrame(index=np.arange(200))\n frame.to_html()\n\n def test_to_html_filename(self):\n biggie = DataFrame({'A': np.random.randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n\n biggie.loc[:20, 'A'] = np.nan\n biggie.loc[:20, 'B'] = np.nan\n with tm.ensure_clean('test.html') as path:\n biggie.to_html(path)\n with open(path, 'r') as f:\n s = biggie.to_html()\n s2 = f.read()\n assert s == s2\n\n frame = DataFrame(index=np.arange(200))\n with tm.ensure_clean('test.html') as path:\n frame.to_html(path)\n with open(path, 'r') as f:\n assert frame.to_html() == f.read()\n\n def test_to_html_with_no_bold(self):\n x = DataFrame({'x': np.random.randn(5)})\n ashtml = x.to_html(bold_rows=False)\n assert '<strong' not in ashtml[ashtml.find(\"</thead>\")]\n\n def test_to_html_columns_arg(self):\n frame = DataFrame(tm.getSeriesData())\n result = frame.to_html(columns=['A'])\n assert '<th>B</th>' not in result\n\n def test_to_html_multiindex(self):\n columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2),\n np.mod(lrange(4), 2))),\n names=['CL0', 'CL1'])\n df = DataFrame([list('abcd'), list('efgh')], columns=columns)\n result = df.to_html(justify='left')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr>\\n'\n ' <th>CL0</th>\\n'\n ' <th colspan=\"2\" halign=\"left\">0</th>\\n'\n ' <th colspan=\"2\" halign=\"left\">1</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>CL1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>a</td>\\n'\n ' <td>b</td>\\n'\n ' <td>c</td>\\n'\n ' <td>d</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>e</td>\\n'\n ' <td>f</td>\\n'\n ' <td>g</td>\\n'\n ' <td>h</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n\n assert result == expected\n\n columns = MultiIndex.from_tuples(list(zip(\n range(4), np.mod(\n lrange(4), 2))))\n df = DataFrame([list('abcd'), list('efgh')], columns=columns)\n\n result = df.to_html(justify='right')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr>\\n'\n ' <th></th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>2</th>\\n'\n ' <th>3</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th></th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>a</td>\\n'\n ' <td>b</td>\\n'\n ' <td>c</td>\\n'\n ' <td>d</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>e</td>\\n'\n ' <td>f</td>\\n'\n ' <td>g</td>\\n'\n ' <td>h</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n\n assert result == expected\n\n @pytest.mark.parametrize(\"justify\", fmt._VALID_JUSTIFY_PARAMETERS)\n def test_to_html_justify(self, justify):\n df = DataFrame({'A': [6, 30000, 2],\n 'B': [1, 2, 70000],\n 'C': [223442, 0, 1]},\n columns=['A', 'B', 'C'])\n result = df.to_html(justify=justify)\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: {justify};\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>6</td>\\n'\n ' <td>1</td>\\n'\n ' <td>223442</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>30000</td>\\n'\n ' <td>2</td>\\n'\n ' <td>0</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>2</th>\\n'\n ' <td>2</td>\\n'\n ' <td>70000</td>\\n'\n ' <td>1</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>'.format(justify=justify))\n assert result == expected\n\n @pytest.mark.parametrize(\"justify\", [\"super-right\", \"small-left\",\n \"noinherit\", \"tiny\", \"pandas\"])\n def test_to_html_invalid_justify(self, justify):\n # see gh-17527\n df = DataFrame()\n msg = \"Invalid value for justify parameter\"\n\n with tm.assert_raises_regex(ValueError, msg):\n df.to_html(justify=justify)\n\n def test_to_html_index(self):\n index = ['foo', 'bar', 'baz']\n df = DataFrame({'A': [1, 2, 3],\n 'B': [1.2, 3.4, 5.6],\n 'C': ['one', 'two', np.nan]},\n columns=['A', 'B', 'C'],\n index=index)\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>foo</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>baz</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n\n expected_without_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n result = df.to_html(index=False)\n for i in index:\n assert i not in result\n assert result == expected_without_index\n df.index = Index(['foo', 'bar', 'baz'], name='idx')\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>idx</th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>foo</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>baz</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n assert df.to_html(index=False) == expected_without_index\n\n tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]\n df.index = MultiIndex.from_tuples(tuples)\n\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th rowspan=\"2\" valign=\"top\">foo</th>\\n'\n ' <th>car</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bike</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <th>car</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n\n result = df.to_html(index=False)\n for i in ['foo', 'bar', 'car', 'bike']:\n assert i not in result\n # must be the same result as normal index\n assert result == expected_without_index\n\n df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>idx1</th>\\n'\n ' <th>idx2</th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th rowspan=\"2\" valign=\"top\">foo</th>\\n'\n ' <th>car</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bike</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <th>car</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n assert df.to_html(index=False) == expected_without_index\n\n def test_to_html_with_classes(self):\n df = DataFrame()\n result = df.to_html(classes=\"sortable draggable\")\n expected = dedent(\"\"\"\n\n <table border=\"1\" class=\"dataframe sortable draggable\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n </tr>\n </thead>\n <tbody>\n </tbody>\n </table>\n\n \"\"\").strip()\n assert result == expected\n\n result = df.to_html(classes=[\"sortable\", \"draggable\"])\n assert result == expected\n\n def test_to_html_no_index_max_rows(self):\n # GH https://github.com/pandas-dev/pandas/issues/14998\n df = DataFrame({\"A\": [1, 2, 3, 4]})\n result = df.to_html(index=False, max_rows=1)\n expected = dedent(\"\"\"\\\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th>A</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1</td>\n </tr>\n </tbody>\n </table>\"\"\")\n assert result == expected\n\n def test_to_html_multiindex_max_cols(self):\n # GH 6131\n index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],\n labels=[[0, 1, 2], [0, 1, 2]],\n names=['b', 'c'])\n columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],\n labels=[[0, 0, 0], [0, 1, 2]],\n names=[None, 'a'])\n data = np.array(\n [[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])\n df = DataFrame(data, index, columns)\n result = df.to_html(max_cols=2)\n expected = dedent(\"\"\"\\\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th colspan=\"3\" halign=\"left\">d</th>\n </tr>\n <tr>\n <th></th>\n <th>a</th>\n <th>aa</th>\n <th>...</th>\n <th>ac</th>\n </tr>\n <tr>\n <th>b</th>\n <th>c</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>ba</th>\n <th>ca</th>\n <td>1.0</td>\n <td>...</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bb</th>\n <th>cb</th>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bc</th>\n <th>cc</th>\n <td>NaN</td>\n <td>...</td>\n <td>3.0</td>\n </tr>\n </tbody>\n </table>\"\"\")\n assert result == expected\n\n def test_to_html_notebook_has_style(self):\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n result = df.to_html(notebook=True)\n assert \"tbody tr th:only-of-type\" in result\n assert \"vertical-align: middle;\" in result\n assert \"thead th\" in result\n\n def test_to_html_notebook_has_no_style(self):\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n result = df.to_html()\n assert \"tbody tr th:only-of-type\" not in result\n assert \"vertical-align: middle;\" not in result\n assert \"thead th\" not in result\n\n def test_to_html_with_index_names_false(self):\n # gh-16493\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index(['a', 'b'],\n name='myindexname'))\n result = df.to_html(index_names=False)\n assert 'myindexname' not in result\n\n def test_to_html_with_id(self):\n # gh-8496\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index(['a', 'b'],\n name='myindexname'))\n result = df.to_html(index_names=False, table_id=\"TEST_ID\")\n assert ' id=\"TEST_ID\"' in result\n" ]
[ [ "pandas.core.common.flatten", "pandas.core.computation.common._result_type_many", "numpy.result_type", "pandas.compat.zip", "pandas.compat.iteritems", "numpy.array", "pandas.compat.range" ], [ "numpy.asarray", "numpy.issubdtype", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.common.values_from_object", "pandas.core.common.asarray_tuplesafe", "pandas.core.ops.get_op_result_name", "pandas.io.formats.format.FloatArrayFormatter", "pandas.core.dtypes.common.is_float", "pandas.core.dtypes.common.is_integer_dtype", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.pandas_dtype", "numpy.isnan", "pandas.core.dtypes.concat._concat_index_same_dtype", "numpy.array", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.core.dtypes.common.is_bool", "pandas.core.dtypes.common.is_bool_dtype", "numpy.array_equal", "pandas.core.dtypes.common.is_scalar", "pandas.core.indexes.base._try_get_item", "pandas.core.dtypes.missing.isna" ], [ "pandas.util._move.move_into_mutable_buffer", "pandas.util._validators.validate_args_and_kwargs", "pandas.util.testing.get_locales", "pandas.util._validators.validate_bool_kwarg", "pandas.util._decorators.deprecate_kwarg", "numpy.geterr", "pandas.util.testing.assert_raises_regex", "pandas.util._validators.validate_args", "pandas.util.testing.assert_produces_warning", "pandas.util._move.stolenbuf", "pandas.util.testing.can_set_locale", "pandas.util.testing.set_locale", "pandas.util.testing.rands", "pandas.util._validators.validate_kwargs", "pandas.util._test_decorators.safe_import", "pandas.util._decorators.make_signature", "pandas.util.testing.rands_array", "pandas.core.common._all_none" ], [ "pandas.to_datetime", "pandas.util.testing.ensure_clean", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.random.randn", "numpy.arange", "pandas.compat.StringIO", "pandas.Index", "pandas.DatetimeIndex", "pandas.compat.u", "pandas.MultiIndex", "pandas.option_context", "pandas.util.testing.makeStringIndex", "pandas.util.testing.getSeriesData", "pandas.MultiIndex.from_product", "numpy.array", "numpy.random.random", "pandas.util.testing.assert_raises_regex", "pandas.MultiIndex.from_arrays", "pandas.compat.lrange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.24", "0.20", "0.21" ], "scipy": [], "tensorflow": [] } ]
billsioros/computational-geometry
[ "398a92e3c08046f85eb3e95828afe62230b816fb" ]
[ "Homework_1/exercise3.py" ]
[ "from matplotlib.patches import Polygon\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom exercise1 import check_for_triangle\nfrom exercise1 import plot_2D_points\n\n\ndef remove_duplicates(lst): \n return [item for item in (set(tuple(i) for i in lst))] \n\n\n# select a point from avaliable points (for ccw)\ndef select_random_point(current_hull_points, point1, point2):\n random_point = current_hull_points[0][0]\n if random_point == point1 or random_point == point2:\n random_points = [p[0] for p in current_hull_points if p[0] != point1 and p[0] != point2]\n random_point = random_points[0]\n return random_point\n\n\n# makes thw final plot with all points and the convex hull\ndef plot_2D_hull(current_hull, all_points):\n points = []\n for line in current_hull:\n points.append(line[0])\n points.append(line[1])\n\n plot_2D_points(points+all_points, polyg=True)\n\n line_of_hull = []\n for k in current_hull:\n line_of_hull.append(k[0])\n line_of_hull.append(k[1])\n hull = np.array(line_of_hull)\n hull_plot = plt.Polygon(hull, fill=False)\n plt.gca().add_patch(hull_plot)\n del line_of_hull[:]\n plt.show()\n\n\n# returns the sign of det\ndef ccw(A, B, C):\n return (B[0] - A[0]) * (C[1] - A[1]) > (B[1] - A[1]) * (C[0] - A[0])\n\n\ndef check_ccw(p, previous_point, end_point, random_point):\n if ccw(previous_point, end_point, random_point):\n if not ccw(previous_point, end_point, p):\n return True\n else:\n return False\n else:\n if ccw(previous_point, end_point, p):\n return True\n else:\n return False\n\n\ndef beneath_beyond(points):\n # Step 1: sort points in descending\n sorted_points = sorted(points, key=lambda x: (x[0], x[1]), reverse=True)\n\n # Step 2: initial hull = triangle\n current_hull_points = []\n current_hull = []\n # if first 3 points are collinear, select (x,min(y)) and (x,max(y))\n if not check_for_triangle(sorted_points[0][0], sorted_points[0][1],\n sorted_points[1][0], sorted_points[1][1],\n sorted_points[2][0], sorted_points[2][1]):\n for p in sorted_points[1:]:\n if p[0] == sorted_points[0][0]:\n last = p\n sorted_points.remove(p)\n sorted_points.append(last)\n sorted_points = sorted(sorted_points, key=lambda x: x[0], reverse=True)\n\n for p in sorted_points[0:2]:\n current_hull_points.append([p, 'blue'])\n current_hull_points.append([sorted_points[2], 'red'])\n \n current_hull.append([sorted_points[0], sorted_points[1], 'blue'])\n current_hull.append([sorted_points[0], sorted_points[2], 'blue'])\n current_hull.append([sorted_points[1], sorted_points[2], 'blue'])\n \n del sorted_points[0:3]\n previous_point = current_hull_points[-1][0]\n\n # Step 3: \n color = [] \n purple_points = []\n for p in sorted_points:\n # Step 3B: find all red lines\n # check every blue line in hull, if it's red now\n for line in current_hull:\n if line[2] == 'blue':\n random_point = select_random_point(current_hull_points, line[0], line[1])\n if check_ccw(p, line[0], line[1], random_point):\n line[2] = 'red'\n else:\n line[2] = 'blue' \n\n # Step 3B: find two purple points\n # re-coloring points\n for point1 in current_hull_points:\n del color[:]\n for point2 in current_hull:\n if point2[0] == point1[0] or point2[1] == point1[0]:\n color.append(point2[2]) \n if len(color) > 0:\n if color[0] != 'purple' and color[1] != 'purple':\n if color[0] != color[1]: # red + blue = purple\n point1[1] = 'purple' \n \n del purple_points[:]\n for point in current_hull_points:\n if point[1] == 'purple':\n purple_points.append(point[0])\n\n # Step 3C: remove all red lines\n for line in current_hull:\n if line[2] == 'red':\n line[2] = 'delete_line'\n current_hull = [elem for elem in current_hull if elem[2] != 'delete_line']\n\n # Step 3C: put two lines from p to purple1 and purple2 point\n current_hull.append([p, purple_points[0], 'blue']) \n current_hull.append([p, purple_points[1], 'blue']) \n\n # initialize for next step\n current_hull_points.append([p,'red'])\n for point in current_hull_points:\n if point[1] == 'purple':\n point[1] = 'blue'\n plot_2D_hull(current_hull, points)\n\n\n\n\nif __name__ == \"__main__\":\n # read points from user(input choice 1)\n # number_of_points = input('Give the number of points: ')\n # if int(number_of_points) < 3:\n # print('Error: Program needs 3 points at least.')\n # exit()\n # points = list(tuple(map(int,input(\"Give a point: \").split())) for r in range(int(number_of_points)))\n \n # random poinsts(input choice 2)\n for i in range(10):\n points = [(random.randrange(-100, 100), random.randrange(-100, 100)) for i in range(20)]\n points = remove_duplicates(points)\n\n # call beneath_beyond algorithm\n beneath_beyond(points)\n" ]
[ [ "matplotlib.pyplot.Polygon", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.gca" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheoLvs/westworld
[ "7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b" ]
[ "westworld/_deprecated/chicken_game.py" ]
[ "\nimport sys\nsys.path.append(\"C:/git/reinforcement-learning\")\n\n\nfrom hyperion.agents import *\nfrom hyperion.environment import *\n\nimport random\nimport numpy as np\nimport uuid\nimport attr\n\n\nSTATUSES = [\"EGG\",\"CHICKEN\",\"COW\",\"FARMER\",\"SUPERMAN\"]\nSIZE = 100\n\n\[email protected](slots = True)\nclass Player(Agent):\n\n # # Agent id\n # id = attr.ib()\n # id.default\n # def _init_id(self):\n # return str(uuid.uuid1())\n\n # Status\n status = attr.ib(default = 0,init=False)\n\n # Position\n x = attr.ib(init = False)\n @x.default\n def _init_x(self):\n return random.randint(0,SIZE)\n\n\n def step(self,env):\n\n # Movement\n new_x = self.x + random.choice([-1,1])\n new_x = np.clip(new_x,0,SIZE-1)\n self.x = new_x\n\n # Others\n others = env.inverse_loc(self.id)\n for other in others:\n if other.x == self.x:\n if other.status == self.status:\n other.status = 0\n self.status += 1\n\n def interacts_with(self,other):\n return self.x == other.x,1\n\n\nclass ChickenGame(Environment):\n\n def render(self):\n env = [\" \"]*SIZE\n for agent in self.agents:\n env[agent.x] = str(agent.status)\n return \"|\"+\"\".join(env)+\"|\"\n\n\n\n def interactions(self):\n pass\n\n\n\n" ]
[ [ "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tpulkit/txt2vid
[ "679b1672fb3221c6b5fe576a158974556047c201" ]
[ "Wav2Lip/util/wav2lip_inference_funcs.py" ]
[ "import numpy as np\nimport os\nimport cv2\nfrom models import Wav2Lip\nimport face_detection\nimport torch\n\ndef get_smoothened_boxes(boxes, T):\n for i in range(len(boxes)):\n if i + T > len(boxes):\n window = boxes[len(boxes) - T:]\n else:\n window = boxes[i: i + T]\n boxes[i] = np.mean(window, axis=0)\n return boxes\n\n\ndef face_detect(images, device, face_det_batch_size, pads, nosmooth):\n detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,\n flip_input=False, device=device)\n\n batch_size = face_det_batch_size\n\n while 1:\n predictions = []\n try:\n for i in range(0, len(images), batch_size):\n predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))\n except RuntimeError:\n if batch_size == 1:\n raise RuntimeError(\n 'Image too big to run face detection on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n print('Recovering from OOM error; New batch size: {}'.format(batch_size))\n continue\n break\n\n results = []\n pady1, pady2, padx1, padx2 = pads\n for rect, image in zip(predictions, images):\n if rect is None:\n cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.\n raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')\n\n y1 = max(0, rect[1] - pady1)\n y2 = min(image.shape[0], rect[3] + pady2)\n x1 = max(0, rect[0] - padx1)\n x2 = min(image.shape[1], rect[2] + padx2)\n\n results.append([x1, y1, x2, y2])\n\n boxes = np.array(results)\n if not nosmooth: boxes = get_smoothened_boxes(boxes, T=5)\n results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]\n\n del detector\n return results\n\n\ndef face_detect_wrapper(frames, device, face_det_batch_size, pads, nosmooth, box, static):\n if box[0] == -1:\n if not static:\n face_det_results = face_detect(frames,\n device, face_det_batch_size, pads, nosmooth) # BGR2RGB for CNN face detection\n else:\n face_det_results = face_detect([frames[0]],\n device, face_det_batch_size, pads, nosmooth)\n else:\n print('Using the specified bounding box instead of face detection...')\n y1, y2, x1, x2 = box\n face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]\n return face_det_results\n\n\ndef datagen(frames, face_det_results, mels, start_frame_idx, static, img_size, wav2lip_batch_size):\n # start frame idx is the current frame idx in the output video\n # we start from this point\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n start_frame_idx = start_frame_idx % len(frames) # loop back\n num_frames = len(mels)\n # take frames from start_frame_idx to start_frame_idx+num_frames\n # wrapping around if necessary\n if not static:\n if len(frames) == 1:\n frames_current = frames\n face_det_results_current = face_det_results\n if start_frame_idx + num_frames > len(frames):\n frames_current = frames[start_frame_idx:] + frames[:start_frame_idx + num_frames - len(frames)]\n face_det_results_current = face_det_results[start_frame_idx:] + face_det_results[\n :start_frame_idx + num_frames - len(frames)]\n else:\n frames_current = frames[start_frame_idx:start_frame_idx + num_frames]\n face_det_results_current = face_det_results[start_frame_idx:start_frame_idx + num_frames]\n\n else:\n frames_current = frames\n face_det_results_current = face_det_results\n\n for i, m in enumerate(mels):\n idx = 0 if static else i % len(frames_current)\n frame_to_save = frames_current[idx].copy()\n face, coords = face_det_results_current[idx].copy()\n\n face = cv2.resize(face, (img_size, img_size))\n\n img_batch.append(face)\n mel_batch.append(m)\n frame_batch.append(frame_to_save)\n coords_batch.append(coords)\n\n if len(img_batch) >= wav2lip_batch_size:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, img_size // 2:] = 0\n\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n\n yield img_batch, mel_batch, frame_batch, coords_batch\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n if len(img_batch) > 0:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, img_size // 2:] = 0\n\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n\n yield img_batch, mel_batch, frame_batch, coords_batch\n\n\ndef _load(checkpoint_path, device):\n if device == 'cuda':\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\ndef load_model(path, device):\n model = Wav2Lip()\n print(\"Load checkpoint from: {}\".format(path))\n checkpoint = _load(path, device)\n s = checkpoint[\"state_dict\"]\n new_s = {}\n for k, v in s.items():\n new_s[k.replace('module.', '')] = v\n model.load_state_dict(new_s)\n\n model = model.to(device)\n return model.eval()\n\n\ndef preprocess_video(face, fps, resize_factor, rotate, crop):\n if not os.path.isfile(face):\n raise ValueError('--face argument must be a valid path to video/image file')\n\n elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n full_frames = [cv2.imread(face)]\n fps = fps\n\n else:\n video_stream = cv2.VideoCapture(face)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n\n print('Reading video frames...')\n\n full_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n if resize_factor > 1:\n frame = cv2.resize(frame, (frame.shape[1] // resize_factor, frame.shape[0] // resize_factor))\n\n if rotate:\n frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)\n\n y1, y2, x1, x2 = crop\n if x2 == -1: x2 = frame.shape[1]\n if y2 == -1: y2 = frame.shape[0]\n\n frame = frame[y1:y2, x1:x2]\n\n full_frames.append(frame)\n\n print(\"Number of frames available for inference: \" + str(len(full_frames)))\n\n return full_frames" ]
[ [ "torch.load", "numpy.asarray", "numpy.concatenate", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rebecca-palmer/statsmodels
[ "27dd8ba0be0211fdc91097463ce4edd28bce1ef4" ]
[ "statsmodels/sandbox/tsa/fftarma.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 14 19:53:25 2009\n\nAuthor: josef-pktd\n\ngenerate arma sample using fft with all the lfilter it looks slow\nto get the ma representation first\n\napply arma filter (in ar representation) to time series to get white noise\nbut seems slow to be useful for fast estimation for nobs=10000\n\nchange/check: instead of using marep, use fft-transform of ar and ma\n separately, use ratio check theory is correct and example works\n DONE : feels much faster than lfilter\n -> use for estimation of ARMA\n -> use pade (scipy.misc) approximation to get starting polynomial\n from autocorrelation (is autocorrelation of AR(p) related to marep?)\n check if pade is fast, not for larger arrays ?\n maybe pade does not do the right thing for this, not tried yet\n scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)\n raises LinAlgError: singular matrix\n also does not have roots inside unit circle ??\n -> even without initialization, it might be fast for estimation\n -> how do I enforce stationarity and invertibility,\n need helper function\n\nget function drop imag if close to zero from numpy/scipy source, where?\n\n\"\"\"\n\nimport numpy as np\nimport numpy.fft as fft\n#import scipy.fftpack as fft\nfrom scipy import signal\n#from try_var_convolve import maxabs\nfrom statsmodels.tsa.arima_process import ArmaProcess\n\n\n#trying to convert old experiments to a class\n\n\nclass ArmaFft(ArmaProcess):\n '''fft tools for arma processes\n\n This class contains several methods that are providing the same or similar\n returns to try out and test different implementations.\n\n Notes\n -----\n TODO:\n check whether we do not want to fix maxlags, and create new instance if\n maxlag changes. usage for different lengths of timeseries ?\n or fix frequency and length for fft\n\n check default frequencies w, terminology norw n_or_w\n\n some ffts are currently done without padding with zeros\n\n returns for spectral density methods needs checking, is it always the power\n spectrum hw*hw.conj()\n\n normalization of the power spectrum, spectral density: not checked yet, for\n example no variance of underlying process is used\n\n '''\n\n def __init__(self, ar, ma, n):\n #duplicates now that are subclassing ArmaProcess\n super(ArmaFft, self).__init__(ar, ma)\n\n self.ar = np.asarray(ar)\n self.ma = np.asarray(ma)\n self.nobs = n\n #could make the polynomials into cached attributes\n self.arpoly = np.polynomial.Polynomial(ar)\n self.mapoly = np.polynomial.Polynomial(ma)\n self.nar = len(ar) #1d only currently\n self.nma = len(ma)\n\n def padarr(self, arr, maxlag, atend=True):\n '''pad 1d array with zeros at end to have length maxlag\n function that is a method, no self used\n\n Parameters\n ----------\n arr : array_like, 1d\n array that will be padded with zeros\n maxlag : int\n length of array after padding\n atend : bool\n If True (default), then the zeros are added to the end, otherwise\n to the front of the array\n\n Returns\n -------\n arrp : ndarray\n zero-padded array\n\n Notes\n -----\n This is mainly written to extend coefficient arrays for the lag-polynomials.\n It returns a copy.\n\n '''\n if atend:\n return np.r_[arr, np.zeros(maxlag-len(arr))]\n else:\n return np.r_[np.zeros(maxlag-len(arr)), arr]\n\n\n def pad(self, maxlag):\n '''construct AR and MA polynomials that are zero-padded to a common length\n\n Parameters\n ----------\n maxlag : int\n new length of lag-polynomials\n\n Returns\n -------\n ar : ndarray\n extended AR polynomial coefficients\n ma : ndarray\n extended AR polynomial coefficients\n\n '''\n arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]\n mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]\n return arpad, mapad\n\n def fftar(self, n=None):\n '''Fourier transform of AR polynomial, zero-padded at end to n\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftar : ndarray\n fft of zero-padded ar polynomial\n '''\n if n is None:\n n = len(self.ar)\n return fft.fft(self.padarr(self.ar, n))\n\n def fftma(self, n):\n '''Fourier transform of MA polynomial, zero-padded at end to n\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftar : ndarray\n fft of zero-padded ar polynomial\n '''\n if n is None:\n n = len(self.ar)\n return fft.fft(self.padarr(self.ma, n))\n\n def fftarma(self, n=None):\n '''Fourier transform of ARMA polynomial, zero-padded at end to n\n\n The Fourier transform of the ARMA process is calculated as the ratio\n of the fft of the MA polynomial divided by the fft of the AR polynomial.\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftarma : ndarray\n fft of zero-padded arma polynomial\n '''\n if n is None:\n n = self.nobs\n return (self.fftma(n) / self.fftar(n))\n\n def spd(self, npos):\n '''raw spectral density, returns Fourier transform\n\n n is number of points in positive spectrum, the actual number of points\n is twice as large. different from other spd methods with fft\n '''\n n = npos\n w = fft.fftfreq(2*n) * 2 * np.pi\n hw = self.fftarma(2*n) #not sure, need to check normalization\n #return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #does not show in plot\n return (hw*hw.conj()).real * 0.5 / np.pi, w\n\n def spdshift(self, n):\n '''power spectral density using fftshift\n\n currently returns two-sided according to fft frequencies, use first half\n '''\n #size = s1+s2-1\n mapadded = self.padarr(self.ma, n)\n arpadded = self.padarr(self.ar, n)\n hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))\n #return np.abs(spd)[n//2-1:]\n w = fft.fftfreq(n) * 2 * np.pi\n wslice = slice(n//2-1, None, None)\n #return (hw*hw.conj()).real[wslice], w[wslice]\n return (hw*hw.conj()).real, w\n\n def spddirect(self, n):\n '''power spectral density using padding to length n done by fft\n\n currently returns two-sided according to fft frequencies, use first half\n '''\n #size = s1+s2-1\n #abs looks wrong\n hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)\n w = fft.fftfreq(n) * 2 * np.pi\n wslice = slice(None, n//2, None)\n #return (np.abs(hw)**2)[wslice], w[wslice]\n return (np.abs(hw)**2) * 0.5/np.pi, w\n\n def _spddirect2(self, n):\n '''this looks bad, maybe with an fftshift\n '''\n #size = s1+s2-1\n hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)\n / fft.fft(np.r_[self.ar[::-1],self.ar], n))\n return (hw*hw.conj()) #.real[n//2-1:]\n\n def spdroots(self, w):\n '''spectral density for frequency using polynomial roots\n\n builds two arrays (number of roots, number of frequencies)\n '''\n return self._spdroots(self.arroots, self.maroots, w)\n\n def _spdroots(self, arroots, maroots, w):\n '''spectral density for frequency using polynomial roots\n\n builds two arrays (number of roots, number of frequencies)\n\n Parameters\n ----------\n arroots : ndarray\n roots of ar (denominator) lag-polynomial\n maroots : ndarray\n roots of ma (numerator) lag-polynomial\n w : array_like\n frequencies for which spd is calculated\n\n Notes\n -----\n this should go into a function\n '''\n w = np.atleast_2d(w).T\n cosw = np.cos(w)\n #Greene 5th edt. p626, section 20.2.7.a.\n maroots = 1./maroots\n arroots = 1./arroots\n num = 1 + maroots**2 - 2* maroots * cosw\n den = 1 + arroots**2 - 2* arroots * cosw\n #print 'num.shape, den.shape', num.shape, den.shape\n hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog\n return np.squeeze(hw), w.squeeze()\n\n def spdpoly(self, w, nma=50):\n '''spectral density from MA polynomial representation for ARMA process\n\n References\n ----------\n Cochrane, section 8.3.3\n '''\n mpoly = np.polynomial.Polynomial(self.arma2ma(nma))\n hw = mpoly(np.exp(1j * w))\n spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)\n return spd, w\n\n def filter(self, x):\n '''\n filter a timeseries with the ARMA filter\n\n padding with zero is missing, in example I needed the padding to get\n initial conditions identical to direct filter\n\n Initial filtered observations differ from filter2 and signal.lfilter, but\n at end they are the same.\n\n See Also\n --------\n tsa.filters.fftconvolve\n\n '''\n n = x.shape[0]\n if n == self.fftarma:\n fftarma = self.fftarma\n else:\n fftarma = self.fftma(n) / self.fftar(n)\n tmpfft = fftarma * fft.fft(x)\n return fft.ifft(tmpfft)\n\n def filter2(self, x, pad=0):\n '''filter a time series using fftconvolve3 with ARMA filter\n\n padding of x currently works only if x is 1d\n in example it produces same observations at beginning as lfilter even\n without padding.\n\n TODO: this returns 1 additional observation at the end\n '''\n from statsmodels.tsa.filters import fftconvolve3\n if not pad:\n pass\n elif pad == 'auto':\n #just guessing how much padding\n x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)\n else:\n x = self.padarr(x, x.shape[0] + int(pad), atend=False)\n\n return fftconvolve3(x, self.ma, self.ar)\n\n\n def acf2spdfreq(self, acovf, nfreq=100, w=None):\n '''\n not really a method\n just for comparison, not efficient for large n or long acf\n\n this is also similarly use in tsa.stattools.periodogram with window\n '''\n if w is None:\n w = np.linspace(0, np.pi, nfreq)[:, None]\n nac = len(acovf)\n hw = 0.5 / np.pi * (acovf[0] +\n 2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))\n return hw\n\n def invpowerspd(self, n):\n '''autocovariance from spectral density\n\n scaling is correct, but n needs to be large for numerical accuracy\n maybe padding with zero in fft would be faster\n without slicing it returns 2-sided autocovariance with fftshift\n\n >>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]\n array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,\n 0.045 , 0.0225 , 0.01125 , 0.005625])\n >>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)\n array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,\n 0.045 , 0.0225 , 0.01125 , 0.005625])\n '''\n hw = self.fftarma(n)\n return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]\n\n def spdmapoly(self, w, twosided=False):\n '''ma only, need division for ar, use LagPolynomial\n '''\n if w is None:\n w = np.linspace(0, np.pi, nfreq)\n return 0.5 / np.pi * self.mapoly(np.exp(w*1j))\n\n\n def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):\n \"\"\"Plot results\"\"\"\n rvs = self.generate_sample(nsample=100, burnin=500)\n acf = self.acf(nacf)[:nacf] #TODO: check return length\n pacf = self.pacf(nacf)\n w = np.linspace(0, np.pi, nfreq)\n spdr, wr = self.spdroots(w)\n\n if fig is None:\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(2,2,1)\n ax.plot(rvs)\n ax.set_title('Random Sample \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,2)\n ax.plot(acf)\n ax.set_title('Autocorrelation \\nar=%s, ma=%rs' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,3)\n ax.plot(wr, spdr)\n ax.set_title('Power Spectrum \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,4)\n ax.plot(pacf)\n ax.set_title('Partial Autocorrelation \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n return fig\n\n\n\n\n\n\n\ndef spdar1(ar, w):\n if np.ndim(ar) == 0:\n rho = ar\n else:\n rho = -ar[1]\n return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))\n\nif __name__ == '__main__':\n def maxabs(x,y):\n return np.max(np.abs(x-y))\n nobs = 200 #10000\n ar = [1, 0.0]\n ma = [1, 0.0]\n ar2 = np.zeros(nobs)\n ar2[:2] = [1, -0.9]\n\n\n\n uni = np.zeros(nobs)\n uni[0]=1.\n #arrep = signal.lfilter(ma, ar, ar2)\n #marep = signal.lfilter([1],arrep, uni)\n # same faster:\n arcomb = np.convolve(ar, ar2, mode='same')\n marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]\n print(marep[:10])\n mafr = fft.fft(marep)\n\n rvs = np.random.normal(size=nobs)\n datafr = fft.fft(rvs)\n y = fft.ifft(mafr*datafr)\n print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))\n\n arrep = signal.lfilter([1],marep, uni)\n print(arrep[:20]) # roundtrip to ar\n arfr = fft.fft(arrep)\n yfr = fft.fft(y)\n x = fft.ifft(arfr*yfr).real #imag part is e-15\n # the next two are equal, roundtrip works\n print(x[:5])\n print(rvs[:5])\n print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))\n\n\n # ARMA filter using fft with ratio of fft of ma/ar lag polynomial\n # seems much faster than using lfilter\n\n #padding, note arcomb is already full length\n arcombp = np.zeros(nobs)\n arcombp[:len(arcomb)] = arcomb\n map_ = np.zeros(nobs) #rename: map was shadowing builtin\n map_[:len(ma)] = ma\n ar0fr = fft.fft(arcombp)\n ma0fr = fft.fft(map_)\n y2 = fft.ifft(ma0fr/ar0fr*datafr)\n #the next two are (almost) equal in real part, almost zero but different in imag\n print(y2[:10])\n print(y[:10])\n print(maxabs(y, y2)) # from chfdiscrete\n #1.1282071239631782e-014\n\n ar = [1, -0.4]\n ma = [1, 0.2]\n\n arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)\n\n nfreq = nobs\n w = np.linspace(0, np.pi, nfreq)\n w2 = np.linspace(0, 2*np.pi, nfreq)\n\n import matplotlib.pyplot as plt\n plt.close('all')\n\n plt.figure()\n spd1, w1 = arma1.spd(2**10)\n print(spd1.shape)\n _ = plt.plot(spd1)\n plt.title('spd fft complex')\n\n plt.figure()\n spd2, w2 = arma1.spdshift(2**10)\n print(spd2.shape)\n _ = plt.plot(w2, spd2)\n plt.title('spd fft shift')\n\n plt.figure()\n spd3, w3 = arma1.spddirect(2**10)\n print(spd3.shape)\n _ = plt.plot(w3, spd3)\n plt.title('spd fft direct')\n\n plt.figure()\n spd3b = arma1._spddirect2(2**10)\n print(spd3b.shape)\n _ = plt.plot(spd3b)\n plt.title('spd fft direct mirrored')\n\n plt.figure()\n spdr, wr = arma1.spdroots(w)\n print(spdr.shape)\n plt.plot(w, spdr)\n plt.title('spd from roots')\n\n plt.figure()\n spdar1_ = spdar1(arma1.ar, w)\n print(spdar1_.shape)\n _ = plt.plot(w, spdar1_)\n plt.title('spd ar1')\n\n\n plt.figure()\n wper, spdper = arma1.periodogram(nfreq)\n print(spdper.shape)\n _ = plt.plot(w, spdper)\n plt.title('periodogram')\n\n startup = 1000\n rvs = arma1.generate_sample(startup+10000)[startup:]\n import matplotlib.mlab as mlb\n plt.figure()\n sdm, wm = mlb.psd(x)\n print('sdm.shape', sdm.shape)\n sdm = sdm.ravel()\n plt.plot(wm, sdm)\n plt.title('matplotlib')\n\n from nitime.algorithms import LD_AR_est\n #yule_AR_est(s, order, Nfreqs)\n wnt, spdnt = LD_AR_est(rvs, 10, 512)\n plt.figure()\n print('spdnt.shape', spdnt.shape)\n _ = plt.plot(spdnt.ravel())\n print(spdnt[:10])\n plt.title('nitime')\n\n fig = plt.figure()\n arma1.plot4(fig)\n\n #plt.show()\n" ]
[ [ "numpy.linspace", "numpy.asarray", "numpy.squeeze", "numpy.fft.fftshift", "matplotlib.pyplot.plot", "numpy.exp", "numpy.arange", "matplotlib.pyplot.close", "scipy.signal.lfilter", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.polynomial.Polynomial", "numpy.ndim", "numpy.fft.ifft", "numpy.atleast_2d", "numpy.fft.fftfreq", "numpy.corrcoef", "numpy.convolve", "matplotlib.mlab.psd", "numpy.abs", "numpy.fft.fft", "numpy.cos", "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tian1327/AutoLDA
[ "be202b70b6d0a02b75ff05016dcd7084c32a9ccf" ]
[ "Hyperband/Embeddings/GLOVE.py" ]
[ "import os\nimport numpy as np\nimport pickle\n# import tqdm\n\ndef load_GLOVE():\n model = 'glove_pretrained_840b_300d.pkl'\n print(\"loading GLOVE pretrained model ......\")\n with open('./Embeddings/GLOVE_pretrained/'+model,'rb') as pk:\n glove_emb = pickle.load(pk)\n print('GLOVE loaded.\\n')\n\n return glove_emb\n\ndef genEmbeddings_GLOVE(keyword):\n # print('gen GLOVE')\n word_embedding = [0 for i in range(300)]\n if keyword in glove_emb:\n word_embedding = glove_emb[keyword]\n else:\n print('--'*10, keyword, 'not found in GLOVE!')\n\n return word_embedding\n\nglove_emb = load_GLOVE()\n\nif __name__ == \"__main__\":\n path_to_glove_file = \"./GLOVE_pretrained/glove.840B.300d.txt\"\n embeddings_dict = {}\n with open(path_to_glove_file) as f:\n for line in f:\n value = line.split(' ')\n word = value[0]\n coefs = np.array(value[1:], dtype = 'float32')\n embeddings_dict[word] = coefs\n\n print('save GLOVE embeddings_dict to pkl ......')\n with open('./GLOVE_pretrained/glove_pretrained_840b_300d.pkl','wb') as f:\n pickle.dump(embeddings_dict, f)\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wyyfkim/MRTA
[ "fab515569d3434cae01733c702fc0e1afc73b552" ]
[ "catkin_ws/src/mrta/src/DataGenerator.py" ]
[ "import os, sys\nimport argparse\nimport pickle\nfrom numpy import random\nfrom Task import Task\nfrom PrecedenceGraph import PrecedenceGraph, Node\nfrom Robot import Robot\nfrom Logger import Logger, LogLevel\n\nclass DataSet:\n\n def __init__(self, p_graphs, robots, beta, bid_alpha, cost_alpha):\n self.p_graphs = p_graphs\n self.robots = robots\n self.beta = beta\n self.bid_alpha = bid_alpha\n self.cost_alpha = cost_alpha \n self.schedules = []\n\nclass DataGenerator:\n\n def __init__(self, map_size_x, map_size_y, logger):\n self._map_size = (map_size_x, map_size_y)\n self._logger = logger\n self.task_types = [1, 2]\n\n def generate_tasks(self, num_of_tasks, task_locations=None):\n if task_locations is not None:\n if len(task_locations) != num_of_tasks:\n self._logger.error(\"generate_tasks: The number of task locations is not same as the number of tasks.\")\n\n tasks = []\n duration = random.randint(20, 40)\n\n for i in range(num_of_tasks):\n task_id = i + 1\n est = random.randint(25, 400)\n lft = est + random.randint(100, 1200)\n task_type = random.choice(self.task_types, 1, p=[0.5, 0.5])[0]\n\n if task_locations is not None:\n pos_x = task_locations[i][0]\n pos_y = task_locations[i][1]\n else:\n pos_x, pos_y = self.generate_locations(1)[0]\n\n tasks.append(Task(est, lft, duration, task_id, pos_x, pos_y, task_type))\n return tasks\n\n def generate_locations(self, num_of_locations):\n locations = []\n for i in range(num_of_locations):\n pos_x = random.randint(0, self._map_size[0])\n pos_y = random.randint(0, self._map_size[1])\n locations.append((pos_x, pos_y))\n return locations\n\n def generate_pgraph(self, tasks, max_num_of_edges):\n p_graph = PrecedenceGraph(tasks)\n min_num_of_edges = len(tasks) / 2\n num_of_edges = min_num_of_edges\n\n if max_num_of_edges > min_num_of_edges:\n num_of_edges = random.randint(min_num_of_edges, max_num_of_edges)\n\n i = 0\n while i < num_of_edges:\n from_task = random.choice(tasks)\n to_task = random.choice(tasks)\n\n if from_task.lft < to_task.lft:\n\n if p_graph.are_connected(from_task, to_task):\n p_graph.remove_edge(from_task, to_task)\n else:\n if p_graph.add_edge(from_task, to_task):\n i += 1\n \n p_graph.build_graph()\n return p_graph\n\n def generate_pgraphs(self, tasks, num_of_pgraphs, max_num_of_edges):\n p_graphs = [] \n\n for i in range(num_of_pgraphs):\n p_graph = self.generate_pgraph(tasks, max_num_of_edges)\n p_graphs.append(p_graph)\n\n return p_graphs \n\n def generate_robots(self, num_of_robots, robot_speed):\n locations = self.generate_locations(num_of_robots) \n robots = []\n task_types = [1,2]\n\n for i in range(num_of_robots):\n robot_id = i + 1\n capability = set()\n ran = random.uniform()\n\n #first robot capable of doing all tasks\n if i == 0 or ran > 0.66:\n capability = set(task_types)\n elif ran > 0.33:\n capability.add(task_types[0])\n else:\n capability.add(task_types[1])\n\n robot = Robot(robot_id, locations[i][0], locations[i][1], capability, robot_speed, self._logger) \n robots.append(robot)\n\n return robots\n\nif __name__ == \"__main__\":\n\n '''if len(sys.argv) < 2:\n print(\"ERROR starting datageneration\")\n exit(1)'''\n\n data_dir = \"../data/\"\n ##dsfile_name = 'dataset' + sys.argv[1] + '.pickle'\n dsfile_name = '../data/dataset1.pickle'\n\n parser = argparse.ArgumentParser(description=\"MRTA Data Generator\")\n\n parser.add_argument('--x',\n help='X Dimention of Map',\n dest='map_x',\n type=int,\n default=100,\n action='store')\n\n parser.add_argument('--y',\n help='Y Dimention of Map',\n dest='map_y',\n type=int,\n default=100,\n action='store')\n\n args = parser.parse_args()\n\n logger = Logger(LogLevel.OFF[0])\n map_x = args.map_x\n map_y = args.map_y\n \n num_of_pgraphs = 50\n ##robot_count_arr = [2, 4, 8]\n ##task_count_arr = [5, 10, 20, 30]\n robot_count_arr = [1]\n task_count_arr = [5]\n\n\n dg = DataGenerator(map_x, map_y, logger)\n robots = { }\n for robot_count in robot_count_arr:\n robots[robot_count] = dg.generate_robots(robot_count, 1)\n\n p_graphs = { }\n for task_count in task_count_arr:\n p_graphs[task_count] = {}\n tasks = dg.generate_tasks(task_count)\n print(tasks)\n max_possible_edges = (task_count * (task_count - 1))/2\n max_num_of_edges = min(3 * task_count, max_possible_edges)\n p_graphs[task_count] = dg.generate_pgraphs(tasks, num_of_pgraphs, max_num_of_edges)\n\n ds = DataSet(p_graphs, robots, 0.25, 0.75, 0.75)\n\n pickle.dump(robots, open('./robots.pickle', 'w'))\n pickle.dump(p_graphs, open('./pgraphs.pickle', 'w'))\n pickle.dump(ds, open(dsfile_name, 'w'))\n\n\n\n \n" ]
[ [ "numpy.random.uniform", "numpy.random.choice", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
noranhe/vnpy_optionmaster
[ "180c85f92004d1092bc45032dc31585539de9768" ]
[ "vnpy_optionmaster/ui/manager.py" ]
[ "from typing import Dict, List, Tuple, Optional\nfrom copy import copy\nfrom functools import partial\n\nfrom scipy import interpolate\n\nfrom vnpy.event import Event, EventEngine\nfrom vnpy.trader.engine import MainEngine\nfrom vnpy.trader.ui import QtWidgets, QtCore, QtGui\nfrom vnpy.trader.event import EVENT_TICK, EVENT_TIMER, EVENT_TRADE\nfrom vnpy.trader.object import TickData, TradeData, LogData\nfrom vnpy.trader.utility import save_json, load_json\n\nfrom ..engine import OptionEngine, OptionAlgoEngine\nfrom ..base import (\n EVENT_OPTION_ALGO_PRICING,\n EVENT_OPTION_ALGO_STATUS,\n EVENT_OPTION_ALGO_LOG,\n PortfolioData,\n ChainData,\n OptionData,\n InstrumentData\n)\nfrom .monitor import (\n MonitorCell, IndexCell, BidCell, AskCell, PosCell,\n COLOR_WHITE, COLOR_BLACK\n)\nfrom ..algo import ElectronicEyeAlgo\n\n\nclass AlgoSpinBox(QtWidgets.QSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setMaximum(999999)\n self.setMinimum(-999999)\n self.setAlignment(QtCore.Qt.AlignCenter)\n\n def get_value(self) -> int:\n \"\"\"\"\"\"\n return self.value()\n\n def set_value(self, value: int) -> None:\n \"\"\"\"\"\"\n self.setValue(value)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoPositiveSpinBox(AlgoSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setMinimum(0)\n\n\nclass AlgoDoubleSpinBox(QtWidgets.QDoubleSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setDecimals(1)\n self.setMaximum(9999.9)\n self.setMinimum(0)\n self.setAlignment(QtCore.Qt.AlignCenter)\n\n def get_value(self) -> float:\n \"\"\"\"\"\"\n return self.value()\n\n def set_value(self, value: float) -> None:\n \"\"\"\"\"\"\n self.setValue(value)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoDirectionCombo(QtWidgets.QComboBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.addItems([\n \"双向\",\n \"做多\",\n \"做空\"\n ])\n\n def get_value(self) -> Dict[str, bool]:\n \"\"\"\"\"\"\n if self.currentText() == \"双向\":\n value: dict = {\n \"long_allowed\": True,\n \"short_allowed\": True\n }\n elif self.currentText() == \"做多\":\n value: dict = {\n \"long_allowed\": True,\n \"short_allowed\": False\n }\n else:\n value: dict = {\n \"long_allowed\": False,\n \"short_allowed\": True\n }\n\n return value\n\n def set_value(self, value: dict) -> None:\n \"\"\"\"\"\"\n if value[\"long_allowed\"] and value[\"short_allowed\"]:\n self.setCurrentIndex(0)\n elif value[\"long_allowed\"]:\n self.setCurrentIndex(1)\n else:\n self.setCurrentIndex(2)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoPricingButton(QtWidgets.QPushButton):\n \"\"\"\"\"\"\n\n def __init__(self, vt_symbol: str, manager: \"ElectronicEyeManager\") -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.vt_symbol: str = vt_symbol\n self.manager: ElectronicEyeManager = manager\n\n self.active: bool = False\n self.setText(\"N\")\n self.clicked.connect(self.on_clicked)\n\n def on_clicked(self) -> None:\n \"\"\"\"\"\"\n if self.active:\n self.manager.stop_algo_pricing(self.vt_symbol)\n else:\n self.manager.start_algo_pricing(self.vt_symbol)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.active = active\n\n if active:\n self.setText(\"Y\")\n else:\n self.setText(\"N\")\n\n\nclass AlgoTradingButton(QtWidgets.QPushButton):\n \"\"\"\"\"\"\n\n def __init__(self, vt_symbol: str, manager: \"ElectronicEyeManager\") -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.vt_symbol: str = vt_symbol\n self.manager: ElectronicEyeManager = manager\n\n self.active: bool = False\n self.setText(\"N\")\n self.clicked.connect(self.on_clicked)\n\n def on_clicked(self) -> None:\n \"\"\"\"\"\"\n if self.active:\n self.manager.stop_algo_trading(self.vt_symbol)\n else:\n self.manager.start_algo_trading(self.vt_symbol)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.active = active\n\n if active:\n self.setText(\"Y\")\n else:\n self.setText(\"N\")\n\n\nclass ElectronicEyeMonitor(QtWidgets.QTableWidget):\n \"\"\"\"\"\"\n\n signal_tick: QtCore.Signal = QtCore.Signal(Event)\n signal_pricing: QtCore.Signal = QtCore.Signal(Event)\n signal_status: QtCore.Signal = QtCore.Signal(Event)\n signal_trade: QtCore.Signal = QtCore.Signal(Event)\n\n headers: List[Dict] = [\n {\"name\": \"bid_volume\", \"display\": \"买量\", \"cell\": BidCell},\n {\"name\": \"bid_price\", \"display\": \"买价\", \"cell\": BidCell},\n {\"name\": \"ask_price\", \"display\": \"卖价\", \"cell\": AskCell},\n {\"name\": \"ask_volume\", \"display\": \"卖量\", \"cell\": AskCell},\n {\"name\": \"algo_bid_price\", \"display\": \"目标\\n买价\", \"cell\": BidCell},\n {\"name\": \"algo_ask_price\", \"display\": \"目标\\n卖价\", \"cell\": AskCell},\n {\"name\": \"algo_spread\", \"display\": \"价差\", \"cell\": MonitorCell},\n {\"name\": \"ref_price\", \"display\": \"理论价\", \"cell\": MonitorCell},\n {\"name\": \"pricing_impv\", \"display\": \"定价\\n隐波\", \"cell\": MonitorCell},\n {\"name\": \"net_pos\", \"display\": \"净持仓\", \"cell\": PosCell},\n\n {\"name\": \"price_spread\", \"display\": \"价格\\n价差\", \"cell\": AlgoDoubleSpinBox},\n {\"name\": \"volatility_spread\", \"display\": \"隐波\\n价差\", \"cell\": AlgoDoubleSpinBox},\n {\"name\": \"max_pos\", \"display\": \"持仓\\n范围\", \"cell\": AlgoPositiveSpinBox},\n {\"name\": \"target_pos\", \"display\": \"目标\\n持仓\", \"cell\": AlgoSpinBox},\n {\"name\": \"max_order_size\", \"display\": \"最大\\n委托\", \"cell\": AlgoPositiveSpinBox},\n {\"name\": \"direction\", \"display\": \"方向\", \"cell\": AlgoDirectionCombo},\n {\"name\": \"pricing_active\", \"display\": \"定价\", \"cell\": AlgoPricingButton},\n {\"name\": \"trading_active\", \"display\": \"交易\", \"cell\": AlgoTradingButton},\n ]\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_engine: EventEngine = option_engine.event_engine\n self.main_engine: MainEngine = option_engine.main_engine\n self.algo_engine: OptionAlgoEngine = option_engine.algo_engine\n self.portfolio_name: str = portfolio_name\n self.setting_filename: str = f\"{portfolio_name}_electronic_eye.json\"\n\n self.cells: Dict[str, Dict] = {}\n\n self.init_ui()\n self.register_event()\n self.load_setting()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"电子眼\")\n self.verticalHeader().setVisible(False)\n self.setEditTriggers(self.NoEditTriggers)\n\n # Set table row and column numbers\n portfolio: PortfolioData = self.option_engine.get_portfolio(self.portfolio_name)\n\n row_count: int = 0\n for chain in portfolio.chains.values():\n row_count += (1 + len(chain.indexes))\n self.setRowCount(row_count)\n\n column_count: int = len(self.headers) * 2 + 1\n self.setColumnCount(column_count)\n\n call_labels: list = [d[\"display\"] for d in self.headers]\n put_labels: list = copy(call_labels)\n put_labels.reverse()\n labels: list = call_labels + [\"行权价\"] + put_labels\n self.setHorizontalHeaderLabels(labels)\n\n # Init cells\n strike_column: int = len(self.headers)\n current_row: int = 0\n\n chain_symbols: list = list(portfolio.chains.keys())\n chain_symbols.sort()\n\n for chain_symbol in chain_symbols:\n chain: ChainData = portfolio.get_chain(chain_symbol)\n\n self.setItem(\n current_row,\n strike_column,\n IndexCell(chain.chain_symbol.split(\".\")[0])\n )\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n current_row += 1\n\n # Call cells\n call_cells: dict = {}\n\n for column, d in enumerate(self.headers):\n cell_type = d[\"cell\"]\n\n if issubclass(cell_type, QtWidgets.QPushButton):\n cell = cell_type(call.vt_symbol, self)\n else:\n cell = cell_type()\n\n call_cells[d[\"name\"]] = cell\n\n if isinstance(cell, QtWidgets.QTableWidgetItem):\n self.setItem(current_row, column, cell)\n else:\n self.setCellWidget(current_row, column, cell)\n\n self.cells[call.vt_symbol] = call_cells\n\n # Put cells\n put_cells: dict = {}\n put_headers: list = copy(self.headers)\n put_headers.reverse()\n\n for column, d in enumerate(put_headers):\n column += (strike_column + 1)\n\n cell_type = d[\"cell\"]\n\n if issubclass(cell_type, QtWidgets.QPushButton):\n cell = cell_type(put.vt_symbol, self)\n else:\n cell = cell_type()\n\n put_cells[d[\"name\"]] = cell\n\n if isinstance(cell, QtWidgets.QTableWidgetItem):\n self.setItem(current_row, column, cell)\n else:\n self.setCellWidget(current_row, column, cell)\n\n self.cells[put.vt_symbol] = put_cells\n\n # Strike cell\n index_cell: IndexCell = IndexCell(str(call.chain_index))\n self.setItem(current_row, strike_column, index_cell)\n\n # Move to next row\n current_row += 1\n\n self.resizeColumnsToContents()\n\n # Update all net pos and tick cells\n for vt_symbol in self.cells.keys():\n self.update_net_pos(vt_symbol)\n\n tick: Optional[TickData] = self.main_engine.get_tick(vt_symbol)\n if tick:\n self.update_tick(tick)\n\n def load_setting(self) -> None:\n \"\"\"\"\"\"\n fields: list = [\n \"price_spread\",\n \"volatility_spread\",\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\",\n \"direction\"\n ]\n\n setting: dict = load_json(self.setting_filename)\n\n for vt_symbol, cells in self.cells.items():\n buf: Optional[dict] = setting.get(vt_symbol, None)\n if buf:\n for field in fields:\n cells[field].set_value(buf[field])\n\n def save_setting(self) -> None:\n \"\"\"\"\"\"\n fields: list = [\n \"price_spread\",\n \"volatility_spread\",\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\",\n \"direction\"\n ]\n\n setting: dict = {}\n for vt_symbol, cells in self.cells.items():\n buf: dict = {}\n for field in fields:\n buf[field] = cells[field].get_value()\n setting[vt_symbol] = buf\n\n save_json(self.setting_filename, setting)\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_pricing.connect(self.process_pricing_event)\n self.signal_status.connect(self.process_status_event)\n self.signal_tick.connect(self.process_tick_event)\n self.signal_trade.connect(self.process_trade_event)\n\n self.event_engine.register(\n EVENT_OPTION_ALGO_PRICING,\n self.signal_pricing.emit\n )\n self.event_engine.register(\n EVENT_OPTION_ALGO_STATUS,\n self.signal_status.emit\n )\n self.event_engine.register(\n EVENT_TICK,\n self.signal_tick.emit\n )\n self.event_engine.register(\n EVENT_TRADE,\n self.signal_trade.emit\n )\n\n def process_tick_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n tick: TickData = event.data\n self.update_tick(tick)\n\n def update_tick(self, tick: TickData) -> None:\n \"\"\"\"\"\"\n cells: Optional[dict] = self.cells.get(tick.vt_symbol, None)\n if not cells:\n return\n\n cells[\"bid_price\"].setText(str(tick.bid_price_1))\n cells[\"ask_price\"].setText(str(tick.ask_price_1))\n cells[\"bid_volume\"].setText(str(tick.bid_volume_1))\n cells[\"ask_volume\"].setText(str(tick.ask_volume_1))\n\n def process_status_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n algo: ElectronicEyeAlgo = event.data\n cells: dict = self.cells[algo.vt_symbol]\n\n cells[\"price_spread\"].update_status(algo.pricing_active)\n cells[\"volatility_spread\"].update_status(algo.pricing_active)\n cells[\"pricing_active\"].update_status(algo.pricing_active)\n\n cells[\"max_pos\"].update_status(algo.trading_active)\n cells[\"target_pos\"].update_status(algo.trading_active)\n cells[\"max_order_size\"].update_status(algo.trading_active)\n cells[\"direction\"].update_status(algo.trading_active)\n cells[\"trading_active\"].update_status(algo.trading_active)\n\n def process_pricing_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n algo: ElectronicEyeAlgo = event.data\n cells: dict = self.cells[algo.vt_symbol]\n\n if algo.ref_price:\n cells[\"algo_bid_price\"].setText(str(algo.algo_bid_price))\n cells[\"algo_ask_price\"].setText(str(algo.algo_ask_price))\n cells[\"algo_spread\"].setText(str(algo.algo_spread))\n cells[\"ref_price\"].setText(str(algo.ref_price))\n cells[\"pricing_impv\"].setText(f\"{algo.pricing_impv * 100:.2f}\")\n else:\n cells[\"algo_bid_price\"].setText(\"\")\n cells[\"algo_ask_price\"].setText(\"\")\n cells[\"algo_spread\"].setText(\"\")\n cells[\"ref_price\"].setText(\"\")\n cells[\"pricing_impv\"].setText(\"\")\n\n def process_trade_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n trade: TradeData = event.data\n self.update_net_pos(trade.vt_symbol)\n\n def update_net_pos(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: Optional[dict] = self.cells.get(vt_symbol, None)\n if not cells:\n return\n\n option: InstrumentData = self.option_engine.get_instrument(vt_symbol)\n cells[\"net_pos\"].setText(str(option.net_pos))\n\n def start_algo_pricing(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: dict = self.cells[vt_symbol]\n\n params: dict = {}\n params[\"price_spread\"] = cells[\"price_spread\"].get_value()\n params[\"volatility_spread\"] = cells[\"volatility_spread\"].get_value()\n\n self.algo_engine.start_algo_pricing(vt_symbol, params)\n\n def stop_algo_pricing(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n self.algo_engine.stop_algo_pricing(vt_symbol)\n\n def start_algo_trading(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: dict = self.cells[vt_symbol]\n\n params = cells[\"direction\"].get_value()\n for name in [\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\"\n ]:\n params[name] = cells[name].get_value()\n\n self.algo_engine.start_algo_trading(vt_symbol, params)\n\n def stop_algo_trading(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n self.algo_engine.stop_algo_trading(vt_symbol)\n\n\nclass ElectronicEyeManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n signal_log = QtCore.Signal(Event)\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_Engine: EventEngine = option_engine.event_engine\n self.algo_engine: OptionAlgoEngine = option_engine.algo_engine\n self.portfolio_name: str = portfolio_name\n\n self.init_ui()\n self.register_event()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"期权电子眼\")\n\n self.algo_monitor: ElectronicEyeMonitor = ElectronicEyeMonitor(self.option_engine, self.portfolio_name)\n\n self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()\n self.log_monitor.setReadOnly(True)\n self.log_monitor.setMaximumWidth(400)\n\n stop_pricing_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"停止定价\")\n stop_pricing_button.clicked.connect(self.stop_pricing_for_all)\n\n stop_trading_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"停止交易\")\n stop_trading_button.clicked.connect(self.stop_trading_for_all)\n\n self.price_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()\n self.volatility_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()\n self.direction_combo: AlgoDirectionCombo = AlgoDirectionCombo()\n self.max_order_size_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()\n self.target_pos_spin: AlgoSpinBox = AlgoSpinBox()\n self.max_pos_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()\n\n price_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n price_spread_button.clicked.connect(self.set_price_spread_for_all)\n\n volatility_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n volatility_spread_button.clicked.connect(self.set_volatility_spread_for_all)\n\n direction_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n direction_button.clicked.connect(self.set_direction_for_all)\n\n max_order_size_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n max_order_size_button.clicked.connect(self.set_max_order_size_for_all)\n\n target_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n target_pos_button.clicked.connect(self.set_target_pos_for_all)\n\n max_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n max_pos_button.clicked.connect(self.set_max_pos_for_all)\n\n QLabel = QtWidgets.QLabel\n grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()\n grid.addWidget(QLabel(\"价格价差\"), 0, 0)\n grid.addWidget(self.price_spread_spin, 0, 1)\n grid.addWidget(price_spread_button, 0, 2)\n grid.addWidget(QLabel(\"隐波价差\"), 1, 0)\n grid.addWidget(self.volatility_spread_spin, 1, 1)\n grid.addWidget(volatility_spread_button, 1, 2)\n grid.addWidget(QLabel(\"持仓范围\"), 2, 0)\n grid.addWidget(self.max_pos_spin, 2, 1)\n grid.addWidget(max_pos_button, 2, 2)\n grid.addWidget(QLabel(\"目标持仓\"), 3, 0)\n grid.addWidget(self.target_pos_spin, 3, 1)\n grid.addWidget(target_pos_button, 3, 2)\n grid.addWidget(QLabel(\"最大委托\"), 4, 0)\n grid.addWidget(self.max_order_size_spin, 4, 1)\n grid.addWidget(max_order_size_button, 4, 2)\n grid.addWidget(QLabel(\"方向\"), 5, 0)\n grid.addWidget(self.direction_combo, 5, 1)\n grid.addWidget(direction_button, 5, 2)\n\n hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox1.addWidget(stop_pricing_button)\n hbox1.addWidget(stop_trading_button)\n\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox1)\n vbox.addLayout(grid)\n vbox.addWidget(self.log_monitor)\n\n hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox.addWidget(self.algo_monitor)\n hbox.addLayout(vbox)\n\n self.setLayout(hbox)\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_log.connect(self.process_log_event)\n\n self.event_Engine.register(EVENT_OPTION_ALGO_LOG, self.signal_log.emit)\n\n def process_log_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n log: LogData = event.data\n timestr: str = log.time.strftime(\"%H:%M:%S\")\n msg: str = f\"{timestr} {log.msg}\"\n self.log_monitor.append(msg)\n\n def show(self) -> None:\n \"\"\"\"\"\"\n self.algo_engine.init_engine(self.portfolio_name)\n self.algo_monitor.resizeColumnsToContents()\n super().showMaximized()\n\n def set_price_spread_for_all(self) -> None:\n \"\"\"\"\"\"\n price_spread: float = self.price_spread_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"price_spread\"].isEnabled():\n cells[\"price_spread\"].setValue(price_spread)\n\n def set_volatility_spread_for_all(self) -> None:\n \"\"\"\"\"\"\n volatility_spread: float = self.volatility_spread_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"volatility_spread\"].isEnabled():\n cells[\"volatility_spread\"].setValue(volatility_spread)\n\n def set_direction_for_all(self) -> None:\n \"\"\"\"\"\"\n ix: int = self.direction_combo.currentIndex()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"direction\"].isEnabled():\n cells[\"direction\"].setCurrentIndex(ix)\n\n def set_max_order_size_for_all(self) -> None:\n \"\"\"\"\"\"\n size: int = self.max_order_size_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"max_order_size\"].isEnabled():\n cells[\"max_order_size\"].setValue(size)\n\n def set_target_pos_for_all(self) -> None:\n \"\"\"\"\"\"\n pos: int = self.target_pos_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"target_pos\"].isEnabled():\n cells[\"target_pos\"].setValue(pos)\n\n def set_max_pos_for_all(self) -> None:\n \"\"\"\"\"\"\n pos: int = self.max_pos_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"max_pos\"].isEnabled():\n cells[\"max_pos\"].setValue(pos)\n\n def stop_pricing_for_all(self) -> None:\n \"\"\"\"\"\"\n for vt_symbol in self.algo_monitor.cells.keys():\n self.algo_monitor.stop_algo_pricing(vt_symbol)\n\n def stop_trading_for_all(self) -> None:\n \"\"\"\"\"\"\n for vt_symbol in self.algo_monitor.cells.keys():\n self.algo_monitor.stop_algo_trading(vt_symbol)\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n \"\"\"\"\"\"\n self.algo_monitor.save_setting()\n event.accept()\n\n\nclass VolatilityDoubleSpinBox(QtWidgets.QDoubleSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setDecimals(1)\n self.setSuffix(\"%\")\n self.setMaximum(200.0)\n self.setMinimum(0)\n\n def get_value(self) -> float:\n \"\"\"\"\"\"\n return self.value()\n\n\nclass PricingVolatilityManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n signal_timer: QtCore.Signal = QtCore.Signal(Event)\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_engine: EventEngine = option_engine.event_engine\n self.portfolio: PortfolioData = option_engine.get_portfolio(portfolio_name)\n\n self.cells: Dict[Tuple, Dict] = {}\n self.chain_symbols: List[str] = []\n self.chain_atm_index: Dict[str, str] = {}\n\n self.init_ui()\n self.register_event()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"波动率管理\")\n\n tab: QtWidgets.QTabWidget = QtWidgets.QTabWidget()\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addWidget(tab)\n self.setLayout(vbox)\n\n self.chain_symbols: list = list(self.portfolio.chains.keys())\n self.chain_symbols.sort()\n\n for chain_symbol in self.chain_symbols:\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()\n table.setEditTriggers(table.NoEditTriggers)\n table.verticalHeader().setVisible(False)\n table.setRowCount(len(chain.indexes))\n table.horizontalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.Stretch\n )\n\n labels: list = [\n \"行权价\",\n \"OTM隐波\",\n \"CALL隐波\",\n \"PUT隐波\",\n \"定价隐波\",\n \"执行拟合\"\n ]\n table.setColumnCount(len(labels))\n table.setHorizontalHeaderLabels(labels)\n\n for row, index in enumerate(chain.indexes):\n index_cell: IndexCell = IndexCell(index)\n otm_impv_cell: MonitorCell = MonitorCell(\"\")\n call_impv_cell: MonitorCell = MonitorCell(\"\")\n put_impv_cell: MonitorCell = MonitorCell(\"\")\n\n set_func = partial(\n self.set_pricing_impv,\n chain_symbol=chain_symbol,\n index=index\n )\n pricing_impv_spin: VolatilityDoubleSpinBox = VolatilityDoubleSpinBox()\n pricing_impv_spin.setAlignment(QtCore.Qt.AlignCenter)\n pricing_impv_spin.valueChanged.connect(set_func)\n\n check: QtWidgets.QCheckBox = QtWidgets.QCheckBox()\n\n check_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n check_hbox.setAlignment(QtCore.Qt.AlignCenter)\n check_hbox.addWidget(check)\n\n check_widget: QtWidgets.QWidget = QtWidgets.QWidget()\n check_widget.setLayout(check_hbox)\n\n table.setItem(row, 0, index_cell)\n table.setItem(row, 1, otm_impv_cell)\n table.setItem(row, 2, call_impv_cell)\n table.setItem(row, 3, put_impv_cell)\n table.setCellWidget(row, 4, pricing_impv_spin)\n table.setCellWidget(row, 5, check_widget)\n\n cells: dict = {\n \"otm_impv\": otm_impv_cell,\n \"call_impv\": call_impv_cell,\n \"put_impv\": put_impv_cell,\n \"pricing_impv\": pricing_impv_spin,\n \"check\": check\n }\n\n self.cells[(chain_symbol, index)] = cells\n\n reset_func = partial(self.reset_pricing_impv, chain_symbol=chain_symbol)\n button_reset: QtWidgets.QPushButton = QtWidgets.QPushButton(\"重置\")\n button_reset.clicked.connect(reset_func)\n\n fit_func = partial(self.fit_pricing_impv, chain_symbol=chain_symbol)\n button_fit: QtWidgets.QPushButton = QtWidgets.QPushButton(\"拟合\")\n button_fit.clicked.connect(fit_func)\n\n increase_func = partial(self.increase_pricing_impv, chain_symbol=chain_symbol)\n button_increase: QtWidgets.QPushButton = QtWidgets.QPushButton(\"+0.1%\")\n button_increase.clicked.connect(increase_func)\n\n decrease_func = partial(self.decrease_pricing_impv, chain_symbol=chain_symbol)\n button_decrease: QtWidgets.QPushButton = QtWidgets.QPushButton(\"-0.1%\")\n button_decrease.clicked.connect(decrease_func)\n\n hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox.addWidget(button_reset)\n hbox.addWidget(button_fit)\n hbox.addWidget(button_increase)\n hbox.addWidget(button_decrease)\n\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addWidget(table)\n\n chain_widget: QtWidgets.QWidget = QtWidgets.QWidget()\n chain_widget.setLayout(vbox)\n tab.addTab(chain_widget, chain_symbol)\n\n self.update_pricing_impv(chain_symbol)\n\n self.default_foreground = otm_impv_cell.foreground()\n self.default_background = otm_impv_cell.background()\n\n table.resizeRowsToContents()\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_timer.connect(self.process_timer_event)\n\n self.event_engine.register(EVENT_TIMER, self.signal_timer.emit)\n\n def process_timer_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n for chain_symbol in self.chain_symbols:\n self.update_chain_impv(chain_symbol)\n\n def reset_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Set pricing impv to the otm mid impv of each strike price.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n call.pricing_impv = otm.mid_impv\n put.pricing_impv = otm.mid_impv\n\n self.update_pricing_impv(chain_symbol)\n\n def fit_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Fit pricing impv with cubic spline algo.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n strike_prices: list = []\n pricing_impvs: list = []\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n cells: dict = self.cells[(chain_symbol, index)]\n\n if not cells[\"check\"].isChecked():\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n strike_prices.append(otm.strike_price)\n pricing_impvs.append(otm.pricing_impv)\n\n cs: interpolate.CubicSpline = interpolate.CubicSpline(strike_prices, pricing_impvs)\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n new_impv: float = float(cs(call.strike_price))\n call.pricing_impv = new_impv\n put.pricing_impv = new_impv\n\n self.update_pricing_impv(chain_symbol)\n\n def increase_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Increase pricing impv of all options within a chain by 0.1%.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n for option in chain.options.values():\n option.pricing_impv += 0.001\n\n self.update_pricing_impv(chain_symbol)\n\n def decrease_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Decrease pricing impv of all options within a chain by 0.1%.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n for option in chain.options.values():\n option.pricing_impv -= 0.001\n\n self.update_pricing_impv(chain_symbol)\n\n def set_pricing_impv(self, value: float, chain_symbol: str, index: str) -> None:\n \"\"\"\"\"\"\n new_impv: float = value / 100\n\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n call: OptionData = chain.calls[index]\n call.pricing_impv = new_impv\n\n put: OptionData = chain.puts[index]\n put.pricing_impv = new_impv\n\n def update_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n if index >= atm_index:\n otm: OptionData = chain.calls[index]\n else:\n otm: OptionData = chain.puts[index]\n\n value: int = round(otm.pricing_impv * 100, 1)\n\n key: tuple = (chain_symbol, index)\n cells: Optional[dict] = self.cells.get(key, None)\n if cells:\n cells[\"pricing_impv\"].setValue(value)\n\n def update_chain_impv(self, chain_symbol: str) -> None:\n \"\"\"\"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n cells: dict = self.cells[(chain_symbol, index)]\n cells[\"otm_impv\"].setText(f\"{otm.mid_impv:.1%}\")\n cells[\"call_impv\"].setText(f\"{call.mid_impv:.1%}\")\n cells[\"put_impv\"].setText(f\"{put.mid_impv:.1%}\")\n\n current_atm_index: str = self.chain_atm_index.get(chain_symbol, \"\")\n if current_atm_index == atm_index:\n return\n self.chain_atm_index[chain_symbol] = atm_index\n\n if current_atm_index:\n old_cells: dict = self.cells[(chain_symbol, current_atm_index)]\n\n for field in [\"otm_impv\", \"call_impv\", \"put_impv\"]:\n old_cells[field].setForeground(COLOR_WHITE)\n old_cells[field].setBackground(self.default_background)\n\n if atm_index:\n new_cells: dict = self.cells[(chain_symbol, atm_index)]\n\n for field in [\"otm_impv\", \"call_impv\", \"put_impv\"]:\n new_cells[field].setForeground(COLOR_BLACK)\n new_cells[field].setBackground(COLOR_WHITE)\n" ]
[ [ "scipy.interpolate.CubicSpline" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "1.3", "1.8" ], "tensorflow": [] } ]
JeffreyJosanne/nematus_tf
[ "582be1eeba2920bfa8cc064fa642c429f5eddd6d" ]
[ "nematus/data_iterator.py" ]
[ "import numpy\n\nimport gzip\n\nimport shuffle\nfrom util import load_dict\n\ndef fopen(filename, mode='r'):\n if filename.endswith('.gz'):\n return gzip.open(filename, mode)\n return open(filename, mode)\n\nclass FileWrapper(object):\n def __init__(self, fname):\n self.pos = 0\n self.lines = fopen(fname).readlines()\n self.lines = numpy.array(self.lines, dtype=numpy.object)\n def __iter__(self):\n return self\n def next(self):\n if self.pos >= len(self.lines):\n raise StopIteration\n l = self.lines[self.pos]\n self.pos += 1\n return l\n def reset(self):\n self.pos = 0\n def seek(self, pos):\n assert pos == 0\n self.pos = 0\n def readline(self):\n return self.next()\n def shuffle_lines(self, perm):\n self.lines = self.lines[perm]\n self.pos = 0\n def __len__(self):\n return len(self.lines)\n\nclass TextIterator:\n \"\"\"Simple Bitext iterator.\"\"\"\n def __init__(self, source, target,\n source_dicts, target_dict,\n batch_size=128,\n maxlen=100,\n n_words_source=-1,\n n_words_target=-1,\n skip_empty=False,\n shuffle_each_epoch=False,\n sort_by_length=True,\n use_factor=False,\n maxibatch_size=20,\n keep_data_in_memory=False):\n if keep_data_in_memory:\n self.source, self.target = FileWrapper(source), FileWrapper(target)\n if shuffle_each_epoch:\n r = numpy.random.permutation(len(self.source))\n self.source.shuffle_lines(r)\n self.target.shuffle_lines(r)\n elif shuffle_each_epoch:\n self.source_orig = source\n self.target_orig = target\n self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)\n else:\n self.source = fopen(source, 'r')\n self.target = fopen(target, 'r')\n self.source_dicts = []\n for source_dict in source_dicts:\n self.source_dicts.append(load_dict(source_dict))\n self.target_dict = load_dict(target_dict)\n\n self.keep_data_in_memory = keep_data_in_memory\n self.batch_size = batch_size\n self.maxlen = maxlen\n self.skip_empty = skip_empty\n self.use_factor = use_factor\n\n self.n_words_source = n_words_source\n self.n_words_target = n_words_target\n\n if self.n_words_source > 0:\n for d in self.source_dicts:\n for key, idx in d.items():\n if idx >= self.n_words_source:\n del d[key]\n\n if self.n_words_target > 0:\n for key, idx in self.target_dict.items():\n if idx >= self.n_words_target:\n del self.target_dict[key]\n\n self.shuffle = shuffle_each_epoch\n self.sort_by_length = sort_by_length\n\n self.source_buffer = []\n self.target_buffer = []\n self.k = batch_size * maxibatch_size\n \n\n self.end_of_data = False\n\n def __iter__(self):\n return self\n\n def reset(self):\n if self.shuffle:\n if self.keep_data_in_memory:\n r = numpy.random.permutation(len(self.source))\n self.source.shuffle_lines(r)\n self.target.shuffle_lines(r)\n else:\n self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)\n else:\n self.source.seek(0)\n self.target.seek(0)\n\n def next(self):\n if self.end_of_data:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n source = []\n target = []\n\n # fill buffer, if it's empty\n assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'\n\n if len(self.source_buffer) == 0:\n for ss in self.source:\n ss = ss.split()\n tt = self.target.readline().split()\n \n if self.skip_empty and (len(ss) == 0 or len(tt) == 0):\n continue\n if len(ss) > self.maxlen or len(tt) > self.maxlen:\n continue\n\n self.source_buffer.append(ss)\n self.target_buffer.append(tt)\n if len(self.source_buffer) == self.k:\n break\n\n if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n # sort by target buffer\n if self.sort_by_length:\n tlen = numpy.array([len(t) for t in self.target_buffer])\n tidx = tlen.argsort()\n\n _sbuf = [self.source_buffer[i] for i in tidx]\n _tbuf = [self.target_buffer[i] for i in tidx]\n\n self.source_buffer = _sbuf\n self.target_buffer = _tbuf\n\n else:\n self.source_buffer.reverse()\n self.target_buffer.reverse()\n\n\n try:\n # actual work here\n while True:\n\n # read from source file and map to word index\n try:\n ss = self.source_buffer.pop()\n except IndexError:\n break\n tmp = []\n for w in ss:\n if self.use_factor:\n w = [self.source_dicts[i][f] if f in self.source_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))]\n else:\n w = [self.source_dicts[0][w] if w in self.source_dicts[0] else 1]\n tmp.append(w)\n ss = tmp\n\n # read from source file and map to word index\n tt = self.target_buffer.pop()\n tt = [self.target_dict[w] if w in self.target_dict else 1\n for w in tt]\n if self.n_words_target > 0:\n tt = [w if w < self.n_words_target else 1 for w in tt]\n\n source.append(ss)\n target.append(tt)\n\n if len(source) >= self.batch_size or \\\n len(target) >= self.batch_size:\n break\n except IOError:\n self.end_of_data = True\n\n return source, target\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joeranbosma/ModelsGenesis
[ "5b18ea88d662e5250523434d02cfdcb6b527e634" ]
[ "pytorch/utils.py" ]
[ "from __future__ import print_function\nimport math\nimport os\nimport random\nimport copy\nimport scipy\nimport imageio\nimport string\nimport numpy as np\nfrom skimage.transform import resize\ntry: # SciPy >= 0.19\n from scipy.special import comb\nexcept ImportError:\n from scipy.misc import comb\n\ndef bernstein_poly(i, n, t):\n \"\"\"\n The Bernstein polynomial of n, i as a function of t\n \"\"\"\n\n return comb(n, i) * ( t**(n-i) ) * (1 - t)**i\n\ndef bezier_curve(points, nTimes=1000):\n \"\"\"\n Given a set of control points, return the\n bezier curve defined by the control points.\n\n Control points should be a list of lists, or list of tuples\n such as [ [1,1], \n [2,3], \n [4,5], ..[Xn, Yn] ]\n nTimes is the number of time steps, defaults to 1000\n\n See http://processingjs.nihongoresources.com/bezierinfo/\n \"\"\"\n\n nPoints = len(points)\n xPoints = np.array([p[0] for p in points])\n yPoints = np.array([p[1] for p in points])\n\n t = np.linspace(0.0, 1.0, nTimes)\n\n polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])\n \n xvals = np.dot(xPoints, polynomial_array)\n yvals = np.dot(yPoints, polynomial_array)\n\n return xvals, yvals\n\ndef data_augmentation(x, y, prob=0.5):\n # augmentation by flipping\n cnt = 3\n while random.random() < prob and cnt > 0:\n degree = random.choice([0, 1, 2])\n x = np.flip(x, axis=degree)\n y = np.flip(y, axis=degree)\n cnt = cnt - 1\n\n return x, y\n\ndef nonlinear_transformation(x, prob=0.5):\n if random.random() >= prob:\n return x\n points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]\n xpoints = [p[0] for p in points]\n ypoints = [p[1] for p in points]\n xvals, yvals = bezier_curve(points, nTimes=100000)\n if random.random() < 0.5:\n # Half change to get flip\n xvals = np.sort(xvals)\n else:\n xvals, yvals = np.sort(xvals), np.sort(yvals)\n nonlinear_x = np.interp(x, xvals, yvals)\n return nonlinear_x\n\ndef local_pixel_shuffling(x, prob=0.5):\n if random.random() >= prob:\n return x\n image_temp = copy.deepcopy(x)\n orig_image = copy.deepcopy(x)\n _, img_rows, img_cols, img_deps = x.shape\n num_block = 10000\n for _ in range(num_block):\n block_noise_size_x = random.randint(1, img_rows//10)\n block_noise_size_y = random.randint(1, img_cols//10)\n block_noise_size_z = random.randint(1, img_deps//10)\n noise_x = random.randint(0, img_rows-block_noise_size_x)\n noise_y = random.randint(0, img_cols-block_noise_size_y)\n noise_z = random.randint(0, img_deps-block_noise_size_z)\n window = orig_image[0, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z,\n ]\n window = window.flatten()\n np.random.shuffle(window)\n window = window.reshape((block_noise_size_x, \n block_noise_size_y, \n block_noise_size_z))\n image_temp[0, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = window\n local_shuffling_x = image_temp\n\n return local_shuffling_x\n\ndef image_in_painting(x):\n _, img_rows, img_cols, img_deps = x.shape\n cnt = 5\n while cnt > 0 and random.random() < 0.95:\n block_noise_size_x = random.randint(img_rows//6, img_rows//3)\n block_noise_size_y = random.randint(img_cols//6, img_cols//3)\n block_noise_size_z = random.randint(img_deps//6, img_deps//3)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = np.random.rand(block_noise_size_x, \n block_noise_size_y, \n block_noise_size_z, ) * 1.0\n cnt -= 1\n return x\n\ndef image_out_painting(x):\n _, img_rows, img_cols, img_deps = x.shape\n image_temp = copy.deepcopy(x)\n x = np.random.rand(x.shape[0], x.shape[1], x.shape[2], x.shape[3], ) * 1.0\n block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)\n block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)\n block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z]\n cnt = 4\n while cnt > 0 and random.random() < 0.95:\n block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)\n block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)\n block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z]\n cnt -= 1\n return x\n \n\n\ndef generate_pair(img, batch_size, config, status=\"test\"):\n img_rows, img_cols, img_deps = img.shape[2], img.shape[3], img.shape[4]\n while True:\n index = [i for i in range(img.shape[0])]\n random.shuffle(index)\n y = img[index[:batch_size]]\n x = copy.deepcopy(y)\n for n in range(batch_size):\n \n # Autoencoder\n x[n] = copy.deepcopy(y[n])\n \n # Flip\n x[n], y[n] = data_augmentation(x[n], y[n], config.flip_rate)\n\n # Local Shuffle Pixel\n x[n] = local_pixel_shuffling(x[n], prob=config.local_rate)\n \n # Apply non-Linear transformation with an assigned probability\n x[n] = nonlinear_transformation(x[n], config.nonlinear_rate)\n \n # Inpainting & Outpainting\n if random.random() < config.paint_rate:\n if random.random() < config.inpaint_rate:\n # Inpainting\n x[n] = image_in_painting(x[n])\n else:\n # Outpainting\n x[n] = image_out_painting(x[n])\n\n # Save sample images module\n if config.save_samples is not None and status == \"train\" and random.random() < 0.01:\n n_sample = random.choice( [i for i in range(config.batch_size)] )\n sample_1 = np.concatenate((x[n_sample,0,:,:,2*img_deps//6], y[n_sample,0,:,:,2*img_deps//6]), axis=1)\n sample_2 = np.concatenate((x[n_sample,0,:,:,3*img_deps//6], y[n_sample,0,:,:,3*img_deps//6]), axis=1)\n sample_3 = np.concatenate((x[n_sample,0,:,:,4*img_deps//6], y[n_sample,0,:,:,4*img_deps//6]), axis=1)\n sample_4 = np.concatenate((x[n_sample,0,:,:,5*img_deps//6], y[n_sample,0,:,:,5*img_deps//6]), axis=1)\n final_sample = np.concatenate((sample_1, sample_2, sample_3, sample_4), axis=0)\n final_sample = final_sample * 255.0\n final_sample = final_sample.astype(np.uint8)\n file_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)])+'.'+config.save_samples\n imageio.imwrite(os.path.join(config.sample_path, config.exp_name, file_name), final_sample)\n\n yield (x, y)\n" ]
[ [ "numpy.dot", "numpy.linspace", "numpy.sort", "numpy.random.shuffle", "numpy.concatenate", "numpy.interp", "numpy.random.rand", "numpy.array", "numpy.flip", "scipy.misc.comb" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] } ]
mattiaguerri/transformers
[ "ebc36108dc1c20985905c79f7d6a00f57f3cd3ae" ]
[ "src/transformers/modeling_mobilebert.py" ]
[ "# MIT License\n#\n# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\nimport math\nimport os\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom transformers.modeling_bert import BertIntermediate\n\nfrom .activations import gelu, gelu_new, swish\nfrom .configuration_mobilebert import MobileBertConfig\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\nMOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\"mobilebert-uncased\"]\n\n\ndef load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.replace(\"ffn_layer\", \"ffn\")\n name = name.replace(\"FakeLayerNorm\", \"LayerNorm\")\n name = name.replace(\"extra_output_weights\", \"dense/kernel\")\n name = name.replace(\"bert\", \"mobilebert\")\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef mish(x):\n return x * torch.tanh(nn.functional.softplus(x))\n\n\nclass NoNorm(nn.Module):\n def __init__(self, feat_size, eps=None):\n super().__init__()\n self.bias = nn.Parameter(torch.zeros(feat_size))\n self.weight = nn.Parameter(torch.ones(feat_size))\n\n def forward(self, input_tensor):\n return input_tensor * self.weight + self.bias\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"gelu_new\": gelu_new, \"mish\": mish}\nNORM2FN = {\"layer_norm\": torch.nn.LayerNorm, \"no_norm\": NoNorm}\n\n\nclass MobileBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.trigram_input = config.trigram_input\n self.embedding_size = config.embedding_size\n self.hidden_size = config.hidden_size\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n embed_dim_multiplier = 3 if self.trigram_input else 1\n embedded_input_size = self.embedding_size * embed_dim_multiplier\n self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)\n\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.trigram_input:\n # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited\n # Devices (https://arxiv.org/abs/2004.02984)\n #\n # The embedding table in BERT models accounts for a substantial proportion of model size. To compress\n # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.\n # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512\n # dimensional output.\n inputs_embeds = torch.cat(\n [\n F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),\n inputs_embeds,\n F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),\n ],\n dim=2,\n )\n if self.trigram_input or self.embedding_size != self.hidden_size:\n inputs_embeds = self.embedding_transformation(inputs_embeds)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass MobileBertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.key = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.value = nn.Linear(\n config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size\n )\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n mixed_query_layer = self.query(query_tensor)\n mixed_key_layer = self.key(key_tensor)\n mixed_value_layer = self.value(value_tensor)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass MobileBertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n if not self.use_bottleneck:\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = MobileBertSelfAttention(config)\n self.output = MobileBertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n self_outputs = self.self(\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = self.output(self_outputs[0], layer_input)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass MobileBertIntermediate(BertIntermediate):\n def __init__(self, config):\n super().__init__(config)\n self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)\n\n\nclass OutputBottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n else:\n self.bottleneck = OutputBottleneck(config)\n\n def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):\n layer_output = self.dense(intermediate_states)\n if not self.use_bottleneck:\n layer_output = self.dropout(layer_output)\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n else:\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n layer_output = self.bottleneck(layer_output, residual_tensor_2)\n return layer_output\n\n\nclass BottleneckLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n layer_input = self.dense(hidden_states)\n layer_input = self.LayerNorm(layer_input)\n return layer_input\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.key_query_shared_bottleneck = config.key_query_shared_bottleneck\n self.use_bottleneck_attention = config.use_bottleneck_attention\n self.input = BottleneckLayer(config)\n if self.key_query_shared_bottleneck:\n self.attention = BottleneckLayer(config)\n\n def forward(self, hidden_states):\n # This method can return three different tuples of values. These different values make use of bottlenecks,\n # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory\n # usage. These linear layer have weights that are learned during training.\n #\n # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the\n # key, query, value, and \"layer input\" to be used by the attention layer.\n # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor\n # in the attention self output, after the attention scores have been computed.\n #\n # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return\n # four values, three of which have been passed through a bottleneck: the query and key, passed through the same\n # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.\n #\n # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,\n # and the residual layer will be this value passed through a bottleneck.\n\n bottlenecked_hidden_states = self.input(hidden_states)\n if self.use_bottleneck_attention:\n return (bottlenecked_hidden_states,) * 4\n elif self.key_query_shared_bottleneck:\n shared_attention_input = self.attention(hidden_states)\n return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)\n else:\n return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)\n\n\nclass FFNOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass FFNLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate = MobileBertIntermediate(config)\n self.output = FFNOutput(config)\n\n def forward(self, hidden_states):\n intermediate_output = self.intermediate(hidden_states)\n layer_outputs = self.output(intermediate_output, hidden_states)\n return layer_outputs\n\n\nclass MobileBertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.num_feedforward_networks = config.num_feedforward_networks\n\n self.attention = MobileBertAttention(config)\n self.intermediate = MobileBertIntermediate(config)\n self.output = MobileBertOutput(config)\n if self.use_bottleneck:\n self.bottleneck = Bottleneck(config)\n if config.num_feedforward_networks > 1:\n self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n if self.use_bottleneck:\n query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)\n else:\n query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4\n\n self_attention_outputs = self.attention(\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n s = (attention_output,)\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.num_feedforward_networks != 1:\n for i, ffn_module in enumerate(self.ffn):\n attention_output = ffn_module(attention_output)\n s += (attention_output,)\n\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output, hidden_states)\n outputs = (\n (layer_output,)\n + outputs\n + (\n torch.tensor(1000),\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_output,\n intermediate_output,\n )\n + s\n )\n return outputs\n\n\nclass MobileBertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n ):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass MobileBertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.do_activate = config.classifier_activation\n if self.do_activate:\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n if not self.do_activate:\n return first_token_tensor\n else:\n pooled_output = self.dense(first_token_tensor)\n pooled_output = F.tanh(pooled_output)\n return pooled_output\n\n\nclass MobileBertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = NORM2FN[\"layer_norm\"](config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass MobileBertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = MobileBertPredictionHeadTransform(config)\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))\n hidden_states += self.bias\n return hidden_states\n\n\nclass MobileBertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass MobileBertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass MobileBertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = MobileBertConfig\n pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST\n load_tf_weights = load_tf_weights_in_mobilebert\n base_model_prefix = \"mobilebert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, (nn.LayerNorm, NoNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nMOBILEBERT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nMOBILEBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.MobileBertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertModel(MobileBertPreTrainedModel):\n \"\"\"\n https://arxiv.org/pdf/2004.02984.pdf\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.embeddings = MobileBertEmbeddings(config)\n self.encoder = MobileBertEncoder(config)\n self.pooler = MobileBertPooler(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_hidden_states=None,\n output_attentions=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pre-training.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertModel, MobileBertTokenizer\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertModel.from_pretrained(model_name_or_path)\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, self.device\n )\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n outputs = (sequence_output, pooled_output,) + encoder_outputs[\n 1:\n ] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with two heads on top as done during the pre-training: a `masked language modeling` head and\n a `next sentence prediction (classification)` head. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForPreTraining(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n input_embeddings = self.get_input_embeddings()\n\n resized_dense = nn.Linear(\n input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False\n )\n kept_data = self.cls.predictions.dense.weight.data[\n ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])\n ]\n resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data\n self.cls.predictions.dense = resized_dense\n self.cls.predictions.dense.to(self.device)\n\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n from transformers import MobileBertTokenizer, MobileBertForPreTraining\n import torch\n tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertForPreTraining.from_pretrained(model_name_or_path)\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n prediction_scores, seq_relationship_scores = outputs[:2]\n\n \"\"\"\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n outputs = (prediction_scores, seq_relationship_score,) + outputs[\n 2:\n ] # add hidden states and attention if they are here\n\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"MobileBert Model with a `language modeling` head on top. \"\"\", MOBILEBERT_START_DOCSTRING)\nclass MobileBertForMaskedLM(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertOnlyMLMHead(config)\n self.config = config\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n input_embeddings = self.get_input_embeddings()\n\n resized_dense = nn.Linear(\n input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False\n )\n kept_data = self.cls.predictions.dense.weight.data[\n ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])\n ]\n resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data\n self.cls.predictions.dense = resized_dense\n self.cls.predictions.dense.to(self.device)\n\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForMaskedLM\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForMaskedLM.from_pretrained('mobilebert-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=input_ids)\n\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\n\n\nclass MobileBertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertOnlyNSPHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):\n Next sequence prediction (classification) loss.\n seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForNextSentencePrediction.from_pretrained('mobilebert-uncased')\n\n prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n next_sentence = \"The sky is blue due to the shorter wavelength of blue light.\"\n encoding = tokenizer.encode_plus(prompt, next_sentence, return_tensors='pt')\n\n loss, logits = model(**encoding, next_sentence_label=torch.LongTensor([1]))\n assert logits[0, 0] < logits[0, 1] # next sentence was random\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n pooled_output = outputs[1]\n\n seq_relationship_score = self.cls(pooled_output)\n\n outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n outputs = (next_sentence_loss,) + outputs\n\n return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForSequenceClassification(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import BertTokenizer, BertForSequenceClassification\n import torch\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n\n loss, logits = outputs[:2]\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n pooled_output = outputs[1]\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForQuestionAnswering(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForQuestionAnswering\n import torch\n\n tokenizer = BertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertForQuestionAnswering.from_pretrained(model_name_or_path)\n\n question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n encoding = tokenizer.encode_plus(question, text)\n input_ids, token_type_ids = encoding[\"input_ids\"], encoding[\"token_type_ids\"]\n start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))\n\n all_tokens = tokenizer.convert_ids_to_tokens(input_ids)\n answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])\n\n assert answer == \"a nice puppet\"\n\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForMultipleChoice(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):\n Classification loss.\n classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n `num_choices` is the second dimension of the input tensors. (see `input_ids` above).\n\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForMultipleChoice\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForMultipleChoice.from_pretrained('mobilebert-uncased')\n\n prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n choice0 = \"It is eaten with a fork and a knife.\"\n choice1 = \"It is eaten while held in the hand.\"\n labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n encoding = tokenizer.batch_encode_plus([[prompt, choice0], [prompt, choice1]], return_tensors='pt', pad_to_max_length=True)\n outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1\n\n # the linear classifier still needs to be trained\n loss, logits = outputs[:2]\n \"\"\"\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MoibleBert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForTokenClassification(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForTokenClassification\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForTokenClassification.from_pretrained('mobilebert-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n\n loss, scores = outputs[:2]\n\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.nn.MSELoss", "torch.zeros", "torch.from_numpy", "torch.nn.Embedding", "torch.arange", "tensorflow.train.load_variable", "torch.nn.Linear", "torch.matmul", "numpy.transpose", "torch.tensor", "tensorflow.train.list_variables", "torch.nn.functional.tanh", "torch.nn.functional.softplus", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
sergeyshilin/kaggle-statoil-iceberg-classifier-challenge
[ "fa5c7e721297d9e1478593951b4d9cf16a0cd66d" ]
[ "ensembling/make_submit.py" ]
[ "import sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import log_loss\n\npower = float(sys.argv[1])\n\ndef transform(preds):\n return preds ** power / (preds ** power + (1.0 - preds) ** power)\n\nwith open('submit_id', 'r') as submit_id:\n last_submit_id = int(submit_id.read())\n\nlast_submit_id = str(last_submit_id).zfill(3)\n\nensemble = pd.read_csv('ensembles/ensemble_{}.csv'.format(last_submit_id))\nensemble_cv = pd.read_csv('ensembles_cv/ensemble_cv_{}.csv'.format(last_submit_id))\n\ny_cv = ensemble_cv.is_iceberg\nx_cv = ensemble_cv.drop('is_iceberg', axis=1).values.mean(axis=1)\n\nprint ('cv log_loss before: {}'.format(log_loss(y_cv, x_cv)))\n\nx_cv_calib = transform(x_cv)\nprint ('cv log_loss calibration: {}'.format(log_loss(y_cv, x_cv_calib)))\n\nx_cv_clip = np.clip(x_cv, 0.001, 0.999)\nprint ('cv log_loss clip: {}'.format(log_loss(y_cv, x_cv_clip)))\n\nx_cv_calib_clip = np.clip(transform(x_cv), 0.001, 0.999)\nprint ('cv log_loss calib+clip: {}'.format(log_loss(y_cv, x_cv_calib_clip)))\n\nsubmit = pd.read_csv('../data/sample_submission.csv')\nsubmit.is_iceberg = np.clip(transform(ensemble.values.mean(axis=1)), 0.001, 0.999)\nsubmit.to_csv('submits/submission_{}_calib_clip_1_4.csv'.format(last_submit_id), index=False)\n\n" ]
[ [ "sklearn.metrics.log_loss", "pandas.read_csv", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
FoVNull/MFDSL
[ "8c6fc99260c1c02f4f45cfb14a111028d2a96ded" ]
[ "validation/new/bare_model.py" ]
[ "from typing import Dict, Any\n\nimport tensorflow as tf\nfrom tensorflow.keras.utils import plot_model\nfrom kashgari_local.abc_feature_model import ABCClassificationModel\nfrom kashgari.layers import L\n\n\nclass Bare_Model(ABCClassificationModel):\n def __init__(self, embedding, **params):\n super().__init__(embedding)\n self.feature_D = params[\"feature_D\"]\n\n @classmethod\n def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Get hyper parameters of model\n Returns:\n hyper parameters dict\n\n activation_function list:\n {softmax, elu, selu, softplus, softsign, swish,\n relu, gelu, tanh, sigmoid, exponential,\n hard_sigmoid, linear, serialize, deserialize, get}\n \"\"\"\n return {\n 'layer_bilstm1': {\n 'units': 128,\n 'return_sequences': True\n },\n 'layer_time_distributed': {},\n 'conv_layer1': {\n 'filters': 128,\n 'kernel_size': 4,\n 'padding': 'valid',\n 'activation': 'relu'\n },\n 'layer_output1': {\n 'activation': 'softmax'\n },\n }\n\n def build_model_arc(self):\n \"\"\"\n build model architectural\n\n BiLSTM + Convolution + Attention\n \"\"\"\n features = tf.keras.Input(shape=(None, self.feature_D), name=\"features\")\n\n l1_reg = tf.keras.regularizers.l1(0.01)\n l2_reg = tf.keras.regularizers.L2(0.01)\n\n output_dim = self.label_processor.vocab_size\n config = self.hyper_parameters\n embed_model = self.embedding.embed_model\n # Define layers for BiLSTM\n layer_stack = [\n L.Bidirectional(L.LSTM(**config['layer_bilstm1'])),\n L.Dropout(rate=0.2),\n ]\n\n # tensor flow in Layers {tensor:=layer(tensor)}\n tensor = embed_model.output\n for layer in layer_stack:\n tensor = layer(tensor)\n\n # extend features\n features_tensor = L.Dense(64, kernel_regularizer=l1_reg)(features)\n # tensor = L.Concatenate(axis=-1)([features_tensor, tensor])\n query = L.Concatenate(axis=-1)([tensor, features_tensor])\n key = L.Concatenate(axis=-1)([features_tensor, tensor])\n\n query_value_attention_seq = L.Attention()([query, key])\n # query_value_attention_seq = L.MultiHeadAttention(\n # num_heads=4, key_dim=2, dropout=0.5\n # )(tensor, tensor)\n\n query_encoding = L.GlobalMaxPool1D()(query)\n query_value_attention = L.GlobalMaxPool1D()(query_value_attention_seq)\n\n input_tensor = L.Concatenate(axis=1)([query_encoding, query_value_attention])\n\n # output tensor\n input_tensor = L.Dropout(rate=0.1)(input_tensor)\n output_tensor = L.Dense(\n output_dim, activation='sigmoid', name=\"output0\",\n kernel_regularizer=l2_reg\n )(input_tensor)\n self.tf_model = tf.keras.Model(inputs=[embed_model.inputs, features], outputs=output_tensor)\n\n # plot_model(self.tf_model, to_file=\"D:/PycProject/TripleC/reference/model.png\")\n" ]
[ [ "tensorflow.keras.regularizers.L2", "tensorflow.keras.Input", "tensorflow.keras.Model", "tensorflow.keras.regularizers.l1" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sandeepsn1997/civilapp
[ "749027e904924575f60883c5d44688101f7e9864" ]
[ "resources/draw1.py" ]
[ "\n\nimport matplotlib.pyplot as plt\n\ndef draw_line_x(ox, oy, x, y, length):\n for i in range(length+1):\n ox.append(x+i)\n oy.append(y)\n return ox, oy\n\ndef draw_line_y(ox, oy, x, y, length):\n for i in range(length+1):\n ox.append(x)\n oy.append(y+i)\n return ox, oy\n\ndef draw_sqr(ox, oy, x, y, length):\n draw_line_x(ox, oy, x, y, length)\n draw_line_x(ox, oy, x, y+length, length)\n draw_line_y(ox, oy, x, y, length)\n draw_line_y(ox, oy, x + length, y, length)\n return ox, oy\n\ndef draw_rect(ox, oy, x, y, length, breadth):\n draw_line_x(ox, oy, x, y, length)\n draw_line_x(ox, oy, x, y+breadth, length)\n draw_line_y(ox, oy, x, y, breadth)\n draw_line_y(ox, oy, x + length, y, breadth)\n\n return ox, oy\n\n\ndef draw_layout():\n ox, oy = [], []\n\n # Outter Box\n ox, oy = draw_rect(ox, oy, -60, 0, 470,300)\n\n #Sites Row1\n ox, oy = draw_rect(ox, oy, 40, 240,25, 30)\n ox, oy = draw_rect(ox, oy, 85, 240, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 240, 25, 30)\n\n\n ox, oy = draw_rect(ox, oy, 265, 240, 25, 30)\n ox, oy = draw_rect(ox, oy, 310, 240, 25, 30)\n # outer boundry for row1\n ox, oy = draw_rect(ox, oy, 30, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 225, 45, 55)\n\n\n ox, oy = draw_rect(ox, oy, 255, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 300, 225, 45, 55)\n\n # Sites Row2\n ox, oy = draw_rect(ox, oy, 40, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 85, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 150, 25, 30)\n\n\n ox, oy = draw_rect(ox, oy, 310, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 355, 150, 25, 30)\n # outer boundry for row2\n ox, oy = draw_rect(ox, oy, 30, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 140, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 140, 45, 55)\n # Sites Row3\n ox, oy = draw_rect(ox, oy, 40,100, 25, 30)\n ox, oy = draw_rect(ox, oy, 85, 100, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 100, 25, 30)\n\n\n\n ox, oy = draw_rect(ox, oy, 310, 100, 25, 30)\n ox, oy = draw_rect(ox, oy,355 , 100, 25, 30)\n\n # outer boundry for row3\n\n ox, oy = draw_rect(ox, oy, 30, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 85, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 85, 45, 55)\n # Sites Row4\n ox, oy = draw_rect(ox, oy, 40, 10,25, 30)\n ox, oy = draw_rect(ox, oy, 85, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 310, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 355, 10, 25, 30)\n\n # outer boundry for row4\n ox, oy = draw_rect(ox, oy, 30, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 120,0, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 0, 45, 55)\n\n return ox, oy\n\ndef draw_empty_space():\n ox, oy = [], []\n ox, oy = draw_sqr(ox, oy, -50, 265, 25)#1\n ox, oy = draw_rect(ox, oy, -50,65,25,135)#2\n ox, oy = draw_sqr(ox, oy,190,240,35)#4\n ox, oy = draw_sqr(ox, oy, 225, 150, 20)#6\n ox, oy = draw_rect(ox, oy, 190,150, 25,35)#5\n ox, oy = draw_rect(ox, oy, -50, 5,40,50 )\n\n ox, oy = draw_rect(ox, oy, 360, 240, 45,55)#7\n ox, oy = draw_rect(ox, oy, 190,90,30,45)#8\n ox, oy = draw_sqr(ox, oy, 240,5, 25)#10\n ox, oy = draw_rect(ox, oy,230,105,40,30)#9\n ox, oy = draw_sqr(ox, oy,190 , 5, 40)#11\n\n return ox, oy\n\nplt.figure(figsize=(10, 8))\nox, oy = draw_layout()\nplt.plot(ox, oy, \"sk\")\n\n\nox, oy = draw_empty_space()\nplt.plot(ox, oy, \"sg\")\nplt.axis(\"equal\")\nplt.grid(True)\n\n\nplt.annotate(\"1\",xy=(-40,275))\nplt.annotate(\"2\",xy=(-40,135))\nplt.annotate(\"3\",xy=(-35,30))\nplt.annotate(\"4\",xy=(205,255))\nplt.annotate(\"5\",xy=(195,165))\nplt.annotate(\"6\",xy=(230,155))\nplt.annotate(\"7\",xy=(375,265))\nplt.annotate(\"8\",xy=(200,112))\nplt.annotate(\"9\",xy=(245,115))\nplt.annotate(\"10\",xy=(245,15))\nplt.annotate(\"11\",xy=(200,25))\nplt.xlabel('X-Coordinates')\nplt.ylabel('Y-Coordinates')\nplt.title('Construction Site Layout Plan',fontsize=15,color=\"red\")\nplt.figtext(0.905,0.8,\"1=Security shed\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.77,\"2=Parking\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.74,\"3=Site office\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.71,\"4=Canteen\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.68,\"5=Labour Shed\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.65,\"6=Toilet\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.62,\"7=Ware House\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.59,\"8=Power House\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.56,\"9=Water tank\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.53,\"10=Q/C Lab\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.50,\"11=Batching Plant\",fontsize=10,color=\"blue\")\n\n\nplt.show()\n\n#plt.axis('scaled')\n#plt.axis(\"square\")\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.annotate", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figtext", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dego1985/wave_simulation
[ "05f5119aab158e0958170d90066c2b87b998e658" ]
[ "sim_004_complex_001/module/plot.py" ]
[ "import numpy as np\nfrom glumpy import app, gl, glm, gloo\nimport torch\n\nimport module.gpu_work as gw\n\n\nclass mesh():\n def __init__(self, motion):\n # plane\n self.motion = motion\n self.N = N = motion.N[:2]\n self.dx = dx = motion.dx\n\n # vertices\n X = [dx * (np.arange(N[i]) - N[i] * 0.5) for i in range(2)]\n x, y = X\n x, y = np.meshgrid(x, y)\n z = motion.update_numpy()\n\n vertices = np.transpose([x, y, z], (1, 2, 0)).reshape(-1, 3)\n\n # colors\n colors = np.random.randn(len(vertices), 4).astype(np.float32)\n\n # outline\n idx = []\n for i in np.arange(N[1]-1):\n for j in np.arange(N[0]-1):\n offset = i * N[0] + j\n idx.append([offset, offset+1, offset+1+N[0], offset+N[0]] +\n [offset, offset+N[0], offset+1, offset+1+N[0]])\n outline = np.array(idx).reshape(-1)\n\n # outline\n idx = np.arange(N[0]*N[1])\n point_idx = np.array(idx).reshape(-1)\n\n ############################################################\n # glumpy Vertex Buffer\n dtype = [(\"position\", np.float32, 3),\n (\"color\", np.float32, 4)]\n VertexBuffer = np.zeros(len(vertices), dtype)\n VertexBuffer[\"position\"] = vertices\n VertexBuffer[\"color\"] = colors\n VertexBuffer = VertexBuffer.view(gloo.VertexBuffer)\n\n # glumpy Index Buffer\n outline = outline.astype(np.uint32).view(gloo.IndexBuffer)\n\n # glumpy Index Buffer\n point_idx = point_idx.astype(np.uint32).view(gloo.IndexBuffer)\n\n ############################################################\n # self\n self.VertexBuffer = VertexBuffer\n self.outline = outline\n self.point_idx = point_idx\n\n ############################################################\n # torch\n v = torch.from_numpy(np.transpose(vertices, (1, 0)).reshape(1, 3, N[0], N[1]).astype(np.float32)).cuda()\n c = torch.from_numpy(np.transpose(colors, (1, 0)).reshape(1, 4, N[0], N[1]).astype(np.float32)).cuda()\n self.v = v\n self.c = c\n\n def update(self, dt=0):\n motion = self.motion\n v = self.v\n c = self.c\n\n z = motion.update(dt)\n\n zc = 0.5 * z\n c[0, 0] = 0 + 2*zc\n c[0, 1] = 0.5 - zc\n c[0, 2] = 1.0 + 2*zc\n c[0, 3] = 1\n\n v[0, 2] = z*0.3\n\n\nclass plot3d():\n def __init__(self, obj):\n self.obj = obj\n self.phi, self.theta = 0, 0\n\n # init\n self.init_window()\n self.bind_obj(obj)\n self.update_VertexBuffer()\n app.run()\n\n def init_window(self):\n window = app.Window(width=1920, height=1080,\n color=(0.30, 0.30, 0.35, 1.00))\n\n @window.event\n def on_init():\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glPolygonOffset(1, 1)\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glLineWidth(0.55)\n\n @window.event\n def on_draw(dt):\n window.clear()\n self.on_draw(dt)\n\n @window.event\n def on_resize(width, height):\n program = self.program\n program['projection'] = glm.perspective(\n 45.0, width / float(height), 0.1, 100.0)\n\n self.window = window\n\n def bind_obj(self, obj):\n # make obj\n vertex = \"\"\"\n uniform vec4 ucolor;\n uniform mat4 model;\n uniform mat4 view;\n uniform mat4 projection;\n attribute vec3 position;\n attribute vec4 color;\n varying vec4 v_color;\n void main()\n {\n v_color = ucolor * color;\n gl_Position = projection * view * model * vec4(position,1.0);\n }\n \"\"\"\n\n fragment = \"\"\"\n varying vec4 v_color;\n void main()\n {\n gl_FragColor = v_color;\n }\n \"\"\"\n\n VertexBuffer = obj.VertexBuffer\n outline = obj.outline\n point_idx = obj.point_idx\n program = gloo.Program(vertex, fragment)\n\n program.bind(VertexBuffer)\n program['model'] = np.eye(4, dtype=np.float32)\n program['view'] = glm.translation(0, 0, -5)\n\n VertexBuffer.activate()\n VertexBuffer.deactivate()\n\n self.RegisteredBuffer = gw.make_RegisteredBuffer(VertexBuffer)\n self.program = program\n self.outline = outline\n self.point_idx = point_idx\n \n def update_VertexBuffer(self, dt=0):\n # torch\n self.obj.update(dt)\n v = self.obj.v\n c = self.obj.c\n V_ = torch.cat((v, c), dim=1)\n V_ = V_.contiguous(memory_format=torch.channels_last)\n\n # copy\n gw.copy_torch2RegisteredBuffer(self.RegisteredBuffer, V_[0])\n\n def on_draw(self, dt):\n program = self.program\n window = self.window\n\n # set title\n window.set_title(str(\n window.fps).encode(\"ascii\"))\n\n self.update_VertexBuffer(dt)\n\n # # Point\n # gl.glDisable(gl.GL_BLEND)\n # gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glPointSize(5)\n # program['ucolor'] = 1, 1, 1, 1\n # program.draw(gl.GL_POINTS, self.point_idx)\n\n # Fill\n gl.glDisable(gl.GL_BLEND)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)\n program['ucolor'] = 1, 1, 1, 1\n program.draw(gl.GL_QUADS, self.outline)\n\n # Outlined program\n # gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)\n # gl.glEnable(gl.GL_BLEND)\n # gl.glDepthMask(gl.GL_FALSE)\n # program['ucolor'] = 0, 0, 0, 1\n # program.draw(gl.GL_LINES, self.outline)\n # gl.glDepthMask(gl.GL_TRUE)\n\n # Make program rotate\n self.theta += 0*dt # degrees\n self.phi += 2*dt # degrees\n model = np.eye(4, dtype=np.float32)\n glm.rotate(model, -90, 1, 0, 0)\n glm.rotate(model, self.theta, 0, 0, 1)\n glm.rotate(model, self.phi, 0, 1, 0)\n glm.rotate(model, 45, 1, 0, 0)\n program['model'] = model\n\n" ]
[ [ "torch.cat", "numpy.arange", "numpy.eye", "numpy.transpose", "numpy.array", "numpy.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WeiChengTseng/Basic_Peptides_Model
[ "0b2bb8f157ec4c9752382eca8ffcbaca94fcaa45" ]
[ "model.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport math\nimport os\n\nclass Model():\n def __init__(self, num_label, word_dim=10, batch_size=32):\n self.num_label = num_label\n self.word_dim = word_dim\n self.batch_size = batch_size\n\n return\n\n def build(self, x, y, reg, keep_prob):\n \"\"\"\n Build the model.\n\n Input:\n - x: the input data, that is, the peptide sequences.\n - y: the ground truth of the peptides.\n - reg: the weight of the regression.\n Output:\n - loss: the loss of the model.\n - logits: the result of the logit regression.\n - predict: the prediction of the peptides.\n \"\"\"\n logits, params = self.sub_model(x, keep_prob)\n loss = self.loss(y, logits, params, reg)\n predict = self.predict(logits)\n\n return loss, logits, predict\n\n def sub_model(self, x, keep_prob):\n \"\"\"\n Define the architecture of the model.\n\n Input:\n - x: the input data, that is, the peptide sequences.\n - keep_prob: the keep probability of dropout.\n\n Output:\n - logits: the result of the logit regression.\n - params: some weights and filters used in the model.\n \"\"\"\n\n params = []\n with tf.name_scope('filters'):\n Filter1 = tf.Variable(tf.truncated_normal([6, self.word_dim, 128], stddev = 0.1), name = 'Filter_1')\n Filter2 = tf.Variable(tf.truncated_normal([6, 128, 128], stddev = 0.1), name = 'Filter_2')\n Filter3 = tf.Variable(tf.truncated_normal([5, 128, 256], stddev = 0.1), name = 'Filter_3')\n Filter4 = tf.Variable(tf.truncated_normal([5, 256, 256], stddev = 0.1), name = 'Filter_4')\n Filter5 = tf.Variable(tf.truncated_normal([5, 256, 512], stddev = 0.1), name = 'Filter_5')\n Filter6 = tf.Variable(tf.truncated_normal([5, 512, 512], stddev = 0.1), name = 'Filter_6')\n self.variable_summaries(Filter1)\n self.variable_summaries(Filter2)\n self.variable_summaries(Filter3)\n self.variable_summaries(Filter4)\n self.variable_summaries(Filter5)\n self.variable_summaries(Filter6)\n\n with tf.name_scope('weights'):\n W7 = tf.Variable(tf.truncated_normal([7168, 1024], stddev = 0.1), name = 'W7')\n W8 = tf.Variable(tf.truncated_normal([1024, self.num_label], stddev = 0.1), name = 'W8')\n self.variable_summaries(W7)\n self.variable_summaries(W8)\n\n with tf.name_scope('bias'):\n b1 = tf.Variable(tf.zeros([128]), name = 'b1')\n b2 = tf.Variable(tf.zeros([128]), name = 'b2')\n b3 = tf.Variable(tf.zeros([256]), name = 'b3')\n b4 = tf.Variable(tf.zeros([256]), name = 'b4')\n b5 = tf.Variable(tf.zeros([512]), name = 'b5')\n b6 = tf.Variable(tf.zeros([512]), name = 'b6')\n b7 = tf.Variable(tf.zeros([1024]), name = 'b7')\n b8 = tf.Variable(tf.zeros([self.num_label]), name = 'b8')\n self.variable_summaries(b1)\n self.variable_summaries(b2)\n self.variable_summaries(b3)\n self.variable_summaries(b4)\n self.variable_summaries(b5)\n self.variable_summaries(b6)\n self.variable_summaries(b7)\n self.variable_summaries(b8)\n alpha = 0.2\n\n with tf.name_scope('Conv_1'):\n L1 = tf.nn.conv1d(x, Filter1, stride = 1, padding = 'VALID', data_format='NHWC') + b1\n with tf.name_scope('leaky_relu_1'):\n L1_act = tf.nn.leaky_relu(L1, alpha)\n L1_bn = tf.layers.batch_normalization(L1_act, scale = False, name = 'bn_1')\n\n with tf.name_scope('Conv_2'):\n L2 = tf.nn.conv1d(L1_bn, Filter2, stride = 1, padding = 'VALID') + b2\n with tf.name_scope('leaky_relu_2'):\n L2_act = tf.nn.leaky_relu(L2, alpha)\n L2_pooled = tf.layers.max_pooling1d(L2_act, pool_size = 2, strides = 2, name = 'max_pool_2')\n L2_bn = tf.layers.batch_normalization(L2_pooled, scale = False, name = 'bn_2')\n\n with tf.name_scope('Conv_3'): \n L3 = tf.nn.conv1d(L2_bn, Filter3, stride = 1, padding = 'VALID') + b3\n with tf.name_scope('leaky_relu_3'):\n L3_act = tf.nn.leaky_relu(L3, alpha)\n L3_pooled = tf.layers.max_pooling1d(L3_act, pool_size = 2, strides = 2, name = 'max_pool_3')\n L3_bn = tf.layers.batch_normalization(L3_pooled, scale = False, name = 'bn_3')\n\n with tf.name_scope('Conv_4'): \n L4 = tf.nn.conv1d(L3_bn, Filter4, stride = 1, padding = 'VALID') + b4\n with tf.name_scope('leaky_relu_4'):\n L4_act = tf.nn.leaky_relu(L4, alpha)\n L4_pooled = tf.layers.max_pooling1d(L4_act, pool_size = 2, strides = 2, name = 'max_pool_4')\n L4_bn = tf.layers.batch_normalization(L4_pooled, scale = False, name = 'bn_4')\n\n with tf.name_scope('Conv_5'): \n L5 = tf.nn.conv1d(L4_bn, Filter5, stride = 1, padding = 'VALID') + b5\n with tf.name_scope('leaky_relu_5'):\n L5_act = tf.nn.leaky_relu(L5, alpha)\n L5_pooled = tf.layers.max_pooling1d(L5_act, pool_size = 2, strides = 2, name = 'max_pool_5')\n L5_bn = tf.layers.batch_normalization(L5_pooled, scale = False, name = 'bn_5')\n\n with tf.name_scope('Conv_6'): \n L6 = tf.nn.conv1d(L5_bn, Filter6, stride = 1, padding = 'VALID') + b6\n with tf.name_scope('leaky_relu_6'):\n L6_act = tf.nn.leaky_relu(L6, alpha)\n L6_pooled = tf.layers.max_pooling1d(L6_act, pool_size = 2, strides = 2, name = 'max_pool_6')\n L6_bn = tf.layers.batch_normalization(L6_pooled, scale = False, name = 'bn_6')\n \n reshaped_data = tf.reshape(L6_bn, shape = (self.batch_size, -1), name = 'reshape')\n\n with tf.name_scope('full_connected_7'):\n L7 = tf.matmul(reshaped_data, W7) + b7\n with tf.name_scope('leaky_relu_7'):\n L7_act = tf.nn.leaky_relu(L7, alpha)\n\n L7_dropout = tf.nn.dropout(L7_act, keep_prob=keep_prob, name = 'dropout')\n L7_bn = tf.layers.batch_normalization(L7_dropout, scale = True, name = 'bm_7')\n \n with tf.name_scope('full_connected_8'):\n L8 = tf.matmul(L7_bn, W8) + b8\n\n logits = L8\n params += [Filter1, Filter2, Filter3, Filter4, Filter5, Filter6]\n params += [W7, W8]\n return logits, params\n\n def predict(self, logits):\n \"\"\"\n Predict the labels according to the model.\n\n Input:\n - logits: the result of the logit regression.\n\n Output:\n - x: the result of the prediction\n \"\"\"\n x = tf.nn.sigmoid(logits)\n \n return x\n\n def loss(self, labels, logits, params, reg):\n \"\"\"\n Define the loss of the model.\n\n Input:\n - label: the ground truth of the prediction.\n - logits: the result of the logit regression.\n - params: some weights and filters used in the model.\n - reg: the weight of the L2 loss\n\n Output:\n - loss: the loss of the model.\n \"\"\"\n\n L2_loss_list = list(map(tf.nn.l2_loss, params))\n L2_loss = tf.add_n(L2_loss_list)\n loss = tf.losses.sigmoid_cross_entropy(labels, logits) + L2_loss * reg\n tf.summary.scalar('loss', loss)\n return loss\n \n def variable_summaries(self, var):\n \"\"\"\n Define the tensorboard scalar and histogram summary.\n\n Input:\n - var: the variable we want to summarize in tensorboard.\n \"\"\"\n with tf.name_scope(\"summaries\"):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))\n tf.summary.scalar('stddev',stddev)\n tf.summary.scalar('max',tf.reduce_max(var))\n tf.summary.scalar('min',tf.reduce_min(var))\n tf.summary.histogram('histogram',var)\n return\n\n" ]
[ [ "tensorflow.zeros", "tensorflow.nn.conv1d", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.layers.max_pooling1d", "tensorflow.layers.batch_normalization", "tensorflow.name_scope", "tensorflow.square", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.nn.sigmoid", "tensorflow.truncated_normal", "tensorflow.summary.histogram", "tensorflow.nn.leaky_relu", "tensorflow.reduce_max", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.losses.sigmoid_cross_entropy", "tensorflow.reduce_min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
DHsLc/test
[ "4b2fb49fd7578afe7e289936f347af581b5bdab1", "f286c78b619b81ca95ba9f738cc0de4e14440e44", "5828e285209ff8c3d1bef2e4bd7c55ca611080d5" ]
[ "tensorflow/python/eager/tape_test.py", "tensorflow/contrib/learn/python/learn/estimators/estimator.py", "tensorflow/python/debug/lib/session_debug_testlib.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic tests for autograd-based gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import custom_gradient\nfrom tensorflow.python.eager import tensor\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\n# Importing nn_grad for the registration functions.\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\n\n\n@custom_gradient.custom_gradient\ndef two_outputs(a, b):\n mm = math_ops.matmul(a, b)\n r = math_ops.reduce_sum(mm)\n\n def grad(dmm, dr):\n return [\n math_ops.matmul(dmm, b, transpose_b=True) +\n math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),\n math_ops.matmul(a, dmm, transpose_b=True) +\n math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)\n ]\n\n return [mm, r], grad\n\n\nclass TapeTest(test.TestCase):\n\n def testMultiOutput(self):\n\n def fn(x, y):\n c = x + y\n # Multiple outputs from split.\n d, f = array_ops.split(c, 2)\n return d + f\n\n a = tensor.Tensor([[1., 0.], [0., 1.]])\n b = tensor.Tensor([[1., 2.], [3., 4.]])\n da, db = backprop.gradients_function(fn, [0, 1])(a, b)\n with context.graph_mode(), self.test_session():\n tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)\n tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)\n tf_c = tf_a + tf_b\n tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)\n tf_e = tf_d + tf_f\n tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])\n\n self.assertAllEqual(da.numpy(), tf_da.eval())\n self.assertAllEqual(db.numpy(), tf_db.eval())\n\n def testBasicFunctional(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n da, = backprop.gradients_function(forward, ['a'])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n\n def testBasicFunctionalPositionalArg(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n da, = backprop.gradients_function(forward, [0])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n\n def testBasicFunctionalWithValue(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n self.assertAllEqual(val.numpy(), forward(aa, bb).numpy())\n\n def testTwoOutputs(self):\n\n def fn(x, y):\n mm, r = two_outputs(x, y)\n return r + math_ops.reduce_sum(mm)\n\n a = tensor.Tensor([[1., 0.], [0., 1.]])\n b = tensor.Tensor([[1., 2.], [3., 4.]])\n da, db = backprop.gradients_function(fn, [0, 1])(a, b)\n with context.graph_mode(), self.test_session():\n tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)\n tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)\n tf_mm = math_ops.matmul(tf_a, tf_b)\n tf_rr = 2 * math_ops.reduce_sum(tf_mm)\n tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])\n\n self.assertAllEqual(da.numpy(), tf_da.eval())\n self.assertAllEqual(db.numpy(), tf_db.eval())\n\n def testGcTwoOutputs(self):\n\n def fn(x, y):\n return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,\n labels=y)[0]\n\n labels = tensor.Tensor([0])\n logits = tensor.Tensor([[0.0]])\n grad, = backprop.gradients_function(fn, [0])(logits, labels)\n self.assertAllEqual(grad.numpy(), [[0.0]])\n\n def testTfTensor(self):\n\n def fn(x):\n return x\n\n t = constant_op.constant(1.0)\n g, = backprop.gradients_function(fn, [0])(t)\n self.assertEqual(g.numpy(), 1.0)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Base Estimator class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport copy\nimport os\nimport tempfile\n\nimport numpy as np\nimport six\n\nfrom google.protobuf import message\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import metrics as metrics_lib\nfrom tensorflow.contrib.framework import deprecated\nfrom tensorflow.contrib.framework import deprecated_args\nfrom tensorflow.contrib.framework import list_variables\nfrom tensorflow.contrib.framework import load_variable\nfrom tensorflow.contrib.learn.python.learn import evaluable\nfrom tensorflow.contrib.learn.python.learn import metric_spec\nfrom tensorflow.contrib.learn.python.learn import monitors as monitor_lib\nfrom tensorflow.contrib.learn.python.learn import trainable\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import constants\nfrom tensorflow.contrib.learn.python.learn.estimators import metric_key\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.contrib.learn.python.learn.estimators import tensor_signature\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError\nfrom tensorflow.contrib.learn.python.learn.learn_io import data_feeder\nfrom tensorflow.contrib.learn.python.learn.utils import export\nfrom tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils\nfrom tensorflow.contrib.meta_graph_transform import meta_graph_transform\nfrom tensorflow.contrib.training.python.training import evaluation\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import builder as saved_model_builder\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.summary import summary as core_summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\n\nAS_ITERABLE_DATE = '2016-09-15'\nAS_ITERABLE_INSTRUCTIONS = (\n 'The default behavior of predict() is changing. The default value for\\n'\n 'as_iterable will change to True, and then the flag will be removed\\n'\n 'altogether. The behavior of this flag is described below.')\nSCIKIT_DECOUPLE_DATE = '2016-12-01'\nSCIKIT_DECOUPLE_INSTRUCTIONS = (\n 'Estimator is decoupled from Scikit Learn interface by moving into\\n'\n 'separate class SKCompat. Arguments x, y and batch_size are only\\n'\n 'available in the SKCompat class, Estimator will only accept input_fn.\\n'\n 'Example conversion:\\n'\n ' est = Estimator(...) -> est = SKCompat(Estimator(...))')\n\n\ndef _verify_input_args(x, y, input_fn, feed_fn, batch_size):\n \"\"\"Verifies validity of co-existence of input arguments.\"\"\"\n if input_fn is None:\n if x is None:\n raise ValueError('Either x or input_fn must be provided.')\n\n if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):\n raise ValueError('Inputs cannot be tensors. Please provide input_fn.')\n\n if feed_fn is not None:\n raise ValueError('Can not provide both feed_fn and x or y.')\n else:\n if (x is not None) or (y is not None):\n raise ValueError('Can not provide both input_fn and x or y.')\n if batch_size is not None:\n raise ValueError('Can not provide both input_fn and batch_size.')\n\n\ndef _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):\n \"\"\"Make inputs into input and feed functions.\n\n Args:\n x: Numpy, Pandas or Dask matrix or iterable.\n y: Numpy, Pandas or Dask matrix or iterable.\n input_fn: Pre-defined input function for training data.\n feed_fn: Pre-defined data feeder function.\n batch_size: Size to split data into parts. Must be >= 1.\n shuffle: Whether to shuffle the inputs.\n epochs: Number of epochs to run.\n\n Returns:\n Data input and feeder function based on training data.\n\n Raises:\n ValueError: Only one of `(x & y)` or `input_fn` must be provided.\n \"\"\"\n _verify_input_args(x, y, input_fn, feed_fn, batch_size)\n if input_fn is not None:\n return input_fn, feed_fn\n df = data_feeder.setup_train_data_feeder(\n x,\n y,\n n_classes=None,\n batch_size=batch_size,\n shuffle=shuffle,\n epochs=epochs)\n return df.input_builder, df.get_feed_dict_fn()\n\n\ndef infer_real_valued_columns_from_input_fn(input_fn):\n \"\"\"Creates `FeatureColumn` objects for inputs defined by `input_fn`.\n\n This interprets all inputs as dense, fixed-length float values. This creates\n a local graph in which it calls `input_fn` to build the tensors, then discards\n it.\n\n Args:\n input_fn: Input function returning a tuple of:\n features - Dictionary of string feature name to `Tensor` or `Tensor`.\n labels - `Tensor` of label values.\n\n Returns:\n List of `FeatureColumn` objects.\n \"\"\"\n with ops.Graph().as_default():\n features, _ = input_fn()\n return layers.infer_real_valued_columns(features)\n\n\ndef infer_real_valued_columns_from_input(x):\n \"\"\"Creates `FeatureColumn` objects for inputs defined by input `x`.\n\n This interprets all inputs as dense, fixed-length float values.\n\n Args:\n x: Real-valued matrix of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features.\n\n Returns:\n List of `FeatureColumn` objects.\n \"\"\"\n input_fn, _ = _get_input_fn(\n x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)\n return infer_real_valued_columns_from_input_fn(input_fn)\n\n\ndef _model_fn_args(fn):\n \"\"\"Get argument names for function-like object.\n\n Args:\n fn: Function, or function-like object (e.g., result of `functools.partial`).\n\n Returns:\n `tuple` of string argument names.\n\n Raises:\n ValueError: if partial function has positionally bound arguments\n \"\"\"\n _, fn = tf_decorator.unwrap(fn)\n if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):\n # Handle functools.partial and similar objects.\n return tuple([\n arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]\n if arg not in set(fn.keywords.keys())\n ])\n # Handle function.\n return tuple(tf_inspect.getargspec(fn).args)\n\n\ndef _get_replica_device_setter(config):\n \"\"\"Creates a replica device setter if required.\n\n Args:\n config: A RunConfig instance.\n\n Returns:\n A replica device setter, or None.\n \"\"\"\n ps_ops = [\n 'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',\n 'MutableHashTableV2', 'MutableHashTableOfTensors',\n 'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',\n 'MutableDenseHashTableV2'\n ]\n\n if config.task_type:\n worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)\n else:\n worker_device = '/job:worker'\n\n if config.num_ps_replicas > 0:\n return device_setter.replica_device_setter(\n ps_tasks=config.num_ps_replicas, worker_device=worker_device,\n merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)\n else:\n return None\n\n\ndef _make_metrics_ops(metrics, features, labels, predictions):\n \"\"\"Add metrics based on `features`, `labels`, and `predictions`.\n\n `metrics` contains a specification for how to run metrics. It is a dict\n mapping friendly names to either `MetricSpec` objects, or directly to a metric\n function (assuming that `predictions` and `labels` are single tensors), or to\n `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and\n `labels` to `metric` (assuming `labels` is a single tensor).\n\n Users are encouraged to use `MetricSpec` objects, which are more flexible and\n cleaner. They also lead to clearer errors.\n\n Args:\n metrics: A dict mapping names to metrics specification, for example\n `MetricSpec` objects.\n features: A dict of tensors returned from an input_fn as features/inputs.\n labels: A single tensor or a dict of tensors returned from an input_fn as\n labels.\n predictions: A single tensor or a dict of tensors output from a model as\n predictions.\n\n Returns:\n A dict mapping the friendly given in `metrics` to the result of calling the\n given metric function.\n\n Raises:\n ValueError: If metrics specifications do not work with the type of\n `features`, `labels`, or `predictions` provided. Mostly, a dict is given\n but no pred_name specified.\n \"\"\"\n metrics = metrics or {}\n\n # If labels is a dict with a single key, unpack into a single tensor.\n labels_tensor_or_dict = labels\n if isinstance(labels, dict) and len(labels) == 1:\n labels_tensor_or_dict = labels[list(labels.keys())[0]]\n\n result = {}\n # Iterate in lexicographic order, so the graph is identical among runs.\n for name, metric in sorted(six.iteritems(metrics)):\n if isinstance(metric, metric_spec.MetricSpec):\n result[name] = metric.create_metric_ops(features, labels, predictions)\n continue\n\n # TODO(b/31229024): Remove the rest of this loop\n logging.warning('Please specify metrics using MetricSpec. Using bare '\n 'functions or (key, fn) tuples is deprecated and support '\n 'for it will be removed on Oct 1, 2016.')\n\n if isinstance(name, tuple):\n # Multi-head metrics.\n if len(name) != 2:\n raise ValueError('Invalid metric for {}. It returned a tuple with '\n 'len {}, expected 2.'.format(name, len(name)))\n if not isinstance(predictions, dict):\n raise ValueError(\n 'Metrics passed provide (name, prediction), '\n 'but predictions are not dict. '\n 'Metrics: %s, Predictions: %s.' % (metrics, predictions))\n # Here are two options: labels are single Tensor or a dict.\n if isinstance(labels, dict) and name[1] in labels:\n # If labels are dict and the prediction name is in it, apply metric.\n result[name[0]] = metric(predictions[name[1]], labels[name[1]])\n else:\n # Otherwise pass the labels to the metric.\n result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)\n else:\n # Single head metrics.\n if isinstance(predictions, dict):\n raise ValueError(\n 'Metrics passed provide only name, no prediction, '\n 'but predictions are dict. '\n 'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))\n result[name] = metric(predictions, labels_tensor_or_dict)\n return result\n\n\ndef _dict_to_str(dictionary):\n \"\"\"Get a `str` representation of a `dict`.\n\n Args:\n dictionary: The `dict` to be represented as `str`.\n\n Returns:\n A `str` representing the `dictionary`.\n \"\"\"\n results = []\n for k, v in sorted(dictionary.items()):\n if isinstance(v, float) or isinstance(v, np.float32) or isinstance(\n v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):\n results.append('%s = %s' % (k, v))\n else:\n results.append('Type of %s = %s' % (k, type(v)))\n\n return ', '.join(results)\n\n\ndef _write_dict_to_summary(output_dir, dictionary, current_global_step):\n \"\"\"Writes a `dict` into summary file in given output directory.\n\n Args:\n output_dir: `str`, directory to write the summary file in.\n dictionary: the `dict` to be written to summary file.\n current_global_step: `int`, the current global step.\n \"\"\"\n logging.info('Saving dict for global step %d: %s', current_global_step,\n _dict_to_str(dictionary))\n summary_writer = core_summary.FileWriterCache.get(output_dir)\n summary_proto = summary_pb2.Summary()\n for key in dictionary:\n if dictionary[key] is None:\n continue\n if key == 'global_step':\n continue\n if (isinstance(dictionary[key], np.float32) or\n isinstance(dictionary[key], float)):\n summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))\n elif (isinstance(dictionary[key], np.int64) or\n isinstance(dictionary[key], np.int32) or\n isinstance(dictionary[key], int)):\n summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))\n elif isinstance(dictionary[key], six.string_types):\n try:\n summ = summary_pb2.Summary.FromString(dictionary[key])\n for i, _ in enumerate(summ.value):\n summ.value[i].tag = key\n summary_proto.value.extend(summ.value)\n except message.DecodeError:\n logging.warn('Skipping summary for %s, cannot parse string to Summary.',\n key)\n continue\n else:\n logging.warn(\n 'Skipping summary for %s, must be a float, np.float32, np.int64, '\n 'np.int32 or int or a serialized string of Summary.', key)\n summary_writer.add_summary(summary_proto, current_global_step)\n summary_writer.flush()\n\n\nGraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',\n ['tags', 'transforms'])\n\n\nclass BaseEstimator(\n sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):\n \"\"\"Abstract BaseEstimator class to train and evaluate TensorFlow models.\n\n Users should not instantiate or subclass this class. Instead, use an\n `Estimator`.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n # Note that for Google users, this is overridden with\n # learn_runner.EstimatorConfig.\n # TODO(wicke): Remove this once launcher takes over config functionality\n _Config = run_config.RunConfig # pylint: disable=invalid-name\n\n def __init__(self, model_dir=None, config=None):\n \"\"\"Initializes a BaseEstimator instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model. If `None`, the model_dir in\n `config` will be used if set. If both are set, they must be same.\n config: A RunConfig instance.\n \"\"\"\n # Create a run configuration.\n if config is None:\n self._config = BaseEstimator._Config()\n logging.info('Using default config.')\n else:\n self._config = config\n\n if self._config.session_config is None:\n self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)\n else:\n self._session_config = self._config.session_config\n\n # Model directory.\n if (model_dir is not None) and (self._config.model_dir is not None):\n if model_dir != self._config.model_dir:\n # TODO(b/9965722): remove this suppression after it is no longer\n # necessary.\n # pylint: disable=g-doc-exception\n raise ValueError(\n \"model_dir are set both in constructor and RunConfig, but with \"\n \"different values. In constructor: '{}', in RunConfig: \"\n \"'{}' \".format(model_dir, self._config.model_dir))\n\n self._model_dir = model_dir or self._config.model_dir\n if self._model_dir is None:\n self._model_dir = tempfile.mkdtemp()\n logging.warning('Using temporary folder as model directory: %s',\n self._model_dir)\n if self._config.model_dir is None:\n self._config = self._config.replace(model_dir=self._model_dir)\n logging.info('Using config: %s', str(vars(self._config)))\n\n # Set device function depending if there are replicas or not.\n self._device_fn = _get_replica_device_setter(self._config)\n\n # Features and labels TensorSignature objects.\n # TODO(wicke): Rename these to something more descriptive\n self._features_info = None\n self._labels_info = None\n\n self._graph = None\n\n @property\n def config(self):\n # TODO(wicke): make RunConfig immutable, and then return it without a copy.\n return copy.deepcopy(self._config)\n\n @deprecated_args(\n SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),\n ('y', None), ('batch_size', None)\n )\n def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,\n monitors=None, max_steps=None):\n # pylint: disable=g-doc-args,g-doc-return-or-yield\n \"\"\"See `Trainable`.\n\n Raises:\n ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n _verify_input_args(x, y, input_fn, None, batch_size)\n if x is not None:\n SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)\n return self\n\n if max_steps is not None:\n try:\n start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)\n if max_steps <= start_step:\n logging.info('Skipping training since max_steps has already saved.')\n return self\n except: # pylint: disable=bare-except\n pass\n\n hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)\n if steps is not None or max_steps is not None:\n hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))\n\n loss = self._train_model(input_fn=input_fn, hooks=hooks)\n logging.info('Loss for final step: %s.', loss)\n return self\n\n @deprecated_args(\n SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),\n ('y', None), ('batch_size', None)\n )\n def partial_fit(\n self, x=None, y=None, input_fn=None, steps=1, batch_size=None,\n monitors=None):\n \"\"\"Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of labels. The training label values\n (class labels in classification, real numbers in regression). If set,\n `input_fn` must be `None`.\n input_fn: Input function. If set, `x`, `y`, and `batch_size` must be\n `None`.\n steps: Number of steps for which to train model. If `None`, train forever.\n batch_size: minibatch size to use on the input, defaults to first\n dimension of `x`. Must be `None` if `input_fn` is provided.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If at least one of `x` and `y` is provided, and `input_fn` is\n provided.\n \"\"\"\n logging.warning('The current implementation of partial_fit is not optimized'\n ' for use in a loop. Consider using fit() instead.')\n return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,\n batch_size=batch_size, monitors=monitors)\n\n @deprecated_args(\n SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),\n ('y', None), ('batch_size', None)\n )\n def evaluate(self,\n x=None,\n y=None,\n input_fn=None,\n feed_fn=None,\n batch_size=None,\n steps=None,\n metrics=None,\n name=None,\n checkpoint_path=None,\n hooks=None,\n log_progress=True):\n # pylint: disable=g-doc-args,g-doc-return-or-yield\n \"\"\"See `Evaluable`.\n\n Raises:\n ValueError: If at least one of `x` or `y` is provided, and at least one of\n `input_fn` or `feed_fn` is provided.\n Or if `metrics` is not `None` or `dict`.\n \"\"\"\n _verify_input_args(x, y, input_fn, feed_fn, batch_size)\n if x is not None:\n return SKCompat(self).score(x, y, batch_size, steps, metrics, name)\n\n if metrics is not None and not isinstance(metrics, dict):\n raise ValueError('Metrics argument should be None or dict. '\n 'Got %s.' % metrics)\n eval_results, global_step = self._evaluate_model(\n input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n metrics=metrics,\n name=name,\n checkpoint_path=checkpoint_path,\n hooks=hooks,\n log_progress=log_progress)\n\n if eval_results is not None:\n eval_results.update({'global_step': global_step})\n return eval_results\n\n @deprecated_args(\n SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),\n ('batch_size', None), ('as_iterable', True)\n )\n def predict(\n self, x=None, input_fn=None, batch_size=None, outputs=None,\n as_iterable=True):\n \"\"\"Returns predictions for given features.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n input_fn: Input function. If set, `x` and 'batch_size' must be `None`.\n batch_size: Override default batch size. If set, 'input_fn' must be\n 'None'.\n outputs: list of `str`, name of the output to predict.\n If `None`, returns all.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n A numpy array of predicted classes or regression values if the\n constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`\n of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of\n predictions if as_iterable is True.\n\n Raises:\n ValueError: If x and input_fn are both provided or both `None`.\n \"\"\"\n _verify_input_args(x, None, input_fn, None, batch_size)\n if x is not None and not as_iterable:\n return SKCompat(self).predict(x, batch_size)\n\n input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)\n return self._infer_model(\n input_fn=input_fn,\n feed_fn=feed_fn,\n outputs=outputs,\n as_iterable=as_iterable)\n\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n Numpy array - value of the tensor.\n \"\"\"\n return load_variable(self.model_dir, name)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n \"\"\"\n return [name for name, _ in list_variables(self.model_dir)]\n\n @property\n def model_dir(self):\n return self._model_dir\n\n @deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')\n def export(self,\n export_dir,\n input_fn=export._default_input_fn, # pylint: disable=protected-access\n input_feature_key=None,\n use_deprecated_input_fn=True,\n signature_fn=None,\n prediction_key=None,\n default_batch_size=1,\n exports_to_keep=None,\n checkpoint_path=None):\n \"\"\"Exports inference graph into given dir.\n\n Args:\n export_dir: A string containing a directory to write the exported graph\n and checkpoints.\n input_fn: If `use_deprecated_input_fn` is true, then a function that given\n `Tensor` of `Example` strings, parses it into features that are then\n passed to the model. Otherwise, a function that takes no argument and\n returns a tuple of (features, labels), where features is a dict of\n string key to `Tensor` and labels is a `Tensor` that's currently not\n used (and so can be `None`).\n input_feature_key: Only used if `use_deprecated_input_fn` is false. String\n key into the features dict returned by `input_fn` that corresponds to a\n the raw `Example` strings `Tensor` that the exported model will take as\n input. Can only be `None` if you're using a custom `signature_fn` that\n does not use the first arg (examples).\n use_deprecated_input_fn: Determines the signature format of `input_fn`.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `Tensor` or `dict` of `Tensor`s for predictions.\n prediction_key: The key for a tensor in the `predictions` dict (output\n from the `model_fn`) to use as the `predictions` input to the\n `signature_fn`. Optional. If `None`, predictions will pass to\n `signature_fn` without filtering.\n default_batch_size: Default batch size of the `Example` placeholder.\n exports_to_keep: Number of exports to keep.\n checkpoint_path: the checkpoint path of the model to be exported. If it is\n `None` (which is default), will use the latest checkpoint in\n export_dir.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added ca. 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because subclasses\n are not returning a value.\n \"\"\"\n # pylint: disable=protected-access\n return export._export_estimator(\n estimator=self,\n export_dir=export_dir,\n signature_fn=signature_fn,\n prediction_key=prediction_key,\n input_fn=input_fn,\n input_feature_key=input_feature_key,\n use_deprecated_input_fn=use_deprecated_input_fn,\n default_batch_size=default_batch_size,\n exports_to_keep=exports_to_keep,\n checkpoint_path=checkpoint_path)\n\n @abc.abstractproperty\n def _get_train_ops(self, features, labels):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overridden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n \"\"\"\n pass\n\n def _get_eval_ops(self, features, labels, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Expected to be overridden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n A `ModelFnOps` object.\n \"\"\"\n raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')\n\n @deprecated(\n '2016-09-23',\n 'The signature of the input_fn accepted by export is changing to be '\n 'consistent with what\\'s used by tf.Learn Estimator\\'s train/evaluate, '\n 'which makes this function useless. This will be removed after the '\n 'deprecation date.')\n def _get_feature_ops_from_example(self, examples_batch):\n \"\"\"Returns feature parser for given example batch using features info.\n\n This function requires `fit()` has been called.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n ValueError: If `_features_info` attribute is not available (usually\n because `fit()` has not been called).\n \"\"\"\n if self._features_info is None:\n raise ValueError('Features information missing, was fit() ever called?')\n return tensor_signature.create_example_parser_from_signatures(\n self._features_info, examples_batch)\n\n def _check_inputs(self, features, labels):\n if self._features_info is not None:\n logging.debug('Given features: %s, required signatures: %s.',\n str(features), str(self._features_info))\n if not tensor_signature.tensors_compatible(features, self._features_info):\n raise ValueError('Features are incompatible with given information. '\n 'Given features: %s, required signatures: %s.' %\n (str(features), str(self._features_info)))\n else:\n self._features_info = tensor_signature.create_signatures(features)\n logging.debug('Setting feature info to %s.', str(self._features_info))\n if labels is not None:\n if self._labels_info is not None:\n logging.debug('Given labels: %s, required signatures: %s.',\n str(labels), str(self._labels_info))\n if not tensor_signature.tensors_compatible(labels, self._labels_info):\n raise ValueError('Labels are incompatible with given information. '\n 'Given labels: %s, required signatures: %s.' %\n (str(labels), str(self._labels_info)))\n else:\n self._labels_info = tensor_signature.create_signatures(labels)\n logging.debug('Setting labels info to %s', str(self._labels_info))\n\n def _extract_metric_update_ops(self, eval_dict):\n \"\"\"Separate update operations from metric value operations.\"\"\"\n update_ops = []\n value_ops = {}\n for name, metric_ops in six.iteritems(eval_dict):\n if isinstance(metric_ops, (list, tuple)):\n if len(metric_ops) == 2:\n value_ops[name] = metric_ops[0]\n update_ops.append(metric_ops[1])\n else:\n logging.warning(\n 'Ignoring metric {}. It returned a list|tuple with len {}, '\n 'expected 2'.format(name, len(metric_ops)))\n value_ops[name] = metric_ops\n else:\n value_ops[name] = metric_ops\n\n if update_ops:\n update_ops = control_flow_ops.group(*update_ops)\n else:\n update_ops = None\n\n return update_ops, value_ops\n\n def _evaluate_model(self,\n input_fn,\n steps,\n feed_fn=None,\n metrics=None,\n name='',\n checkpoint_path=None,\n hooks=None,\n log_progress=True):\n # TODO(wicke): Remove this once Model and associated code are gone.\n if (hasattr(self._config, 'execution_mode') and\n self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):\n return None, None\n\n # Check that model has been trained (if nothing has been set explicitly).\n if not checkpoint_path:\n latest_path = saver.latest_checkpoint(self._model_dir)\n if not latest_path:\n raise NotFittedError(\"Couldn't find trained model at %s.\"\n % self._model_dir)\n checkpoint_path = latest_path\n\n # Setup output directory.\n eval_dir = os.path.join(self._model_dir, 'eval' if not name else\n 'eval_' + name)\n\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = training_util.create_global_step(g)\n features, labels = input_fn()\n self._check_inputs(features, labels)\n\n model_fn_results = self._get_eval_ops(features, labels, metrics)\n eval_dict = model_fn_results.eval_metric_ops\n\n update_op, eval_dict = self._extract_metric_update_ops(eval_dict)\n\n # We need to copy the hook array as we modify it, thus [:].\n hooks = hooks[:] if hooks else []\n if feed_fn:\n hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))\n if steps == 0:\n logging.warning('evaluation steps are 0. If `input_fn` does not raise'\n 'OutOfRangeError`, the evaluation will never stop.'\n 'Use steps=None if intended.')\n if steps:\n hooks.append(\n evaluation.StopAfterNEvalsHook(\n steps, log_progress=log_progress))\n\n global_step_key = 'global_step'\n while global_step_key in eval_dict:\n global_step_key = '_' + global_step_key\n eval_dict[global_step_key] = global_step\n\n eval_results = evaluation.evaluate_once(\n checkpoint_path=checkpoint_path,\n master=self._config.evaluation_master,\n scaffold=model_fn_results.scaffold,\n eval_ops=update_op,\n final_ops=eval_dict,\n hooks=hooks,\n config=self._session_config)\n current_global_step = eval_results[global_step_key]\n\n _write_dict_to_summary(eval_dir, eval_results, current_global_step)\n\n return eval_results, current_global_step\n\n def _get_features_from_input_fn(self, input_fn):\n result = input_fn()\n if isinstance(result, (list, tuple)):\n return result[0]\n return result\n\n def _infer_model(self,\n input_fn,\n feed_fn=None,\n outputs=None,\n as_iterable=True,\n iterate_batches=False):\n # Check that model has been trained.\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n if not checkpoint_path:\n raise NotFittedError(\"Couldn't find trained model at %s.\"\n % self._model_dir)\n\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n training_util.create_global_step(g)\n features = self._get_features_from_input_fn(input_fn)\n infer_ops = self._get_predict_ops(features)\n predictions = self._filter_predictions(infer_ops.predictions, outputs)\n mon_sess = monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n checkpoint_filename_with_path=checkpoint_path,\n scaffold=infer_ops.scaffold,\n config=self._session_config))\n if not as_iterable:\n with mon_sess:\n if not mon_sess.should_stop():\n return mon_sess.run(predictions, feed_fn() if feed_fn else None)\n else:\n return self._predict_generator(mon_sess, predictions, feed_fn,\n iterate_batches)\n\n def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):\n with mon_sess:\n while not mon_sess.should_stop():\n preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)\n if iterate_batches:\n yield preds\n elif not isinstance(predictions, dict):\n for pred in preds:\n yield pred\n else:\n first_tensor = list(preds.values())[0]\n if isinstance(first_tensor, sparse_tensor.SparseTensorValue):\n batch_length = first_tensor.dense_shape[0]\n else:\n batch_length = first_tensor.shape[0]\n for i in range(batch_length):\n yield {key: value[i] for key, value in six.iteritems(preds)}\n if self._is_input_constant(feed_fn, mon_sess.graph):\n return\n\n def _is_input_constant(self, feed_fn, graph):\n # If there are no queue_runners, the input `predictions` is a\n # constant, and we should stop after the first epoch. If,\n # instead, there are queue_runners, eventually they should throw\n # an `OutOfRangeError`.\n if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):\n return False\n # data_feeder uses feed_fn to generate `OutOfRangeError`.\n if feed_fn is not None:\n return False\n return True\n\n def _filter_predictions(self, predictions, outputs):\n if not outputs:\n return predictions\n if not isinstance(predictions, dict):\n raise ValueError(\n 'outputs argument is not valid in case of non-dict predictions.')\n existing_keys = predictions.keys()\n predictions = {\n key: value\n for key, value in six.iteritems(predictions) if key in outputs\n }\n if not predictions:\n raise ValueError('Expected to run at least one output from %s, '\n 'provided %s.' % (existing_keys, outputs))\n return predictions\n\n def _train_model(self, input_fn, hooks):\n all_hooks = []\n self._graph = ops.Graph()\n with self._graph.as_default() as g, g.device(self._device_fn):\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = training_util.create_global_step(g)\n features, labels = input_fn()\n self._check_inputs(features, labels)\n model_fn_ops = self._get_train_ops(features, labels)\n ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)\n all_hooks.extend(hooks)\n all_hooks.extend([\n basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),\n basic_session_run_hooks.LoggingTensorHook(\n {\n 'loss': model_fn_ops.loss,\n 'step': global_step\n },\n every_n_iter=100)\n ])\n\n scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()\n if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):\n ops.add_to_collection(\n ops.GraphKeys.SAVERS,\n saver.Saver(\n sharded=True,\n max_to_keep=self._config.keep_checkpoint_max,\n defer_build=True,\n save_relative_paths=True))\n\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n saver_hook_exists = any([\n isinstance(h, basic_session_run_hooks.CheckpointSaverHook)\n for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +\n model_fn_ops.training_chief_hooks)\n ])\n if not saver_hook_exists:\n chief_hooks = [\n basic_session_run_hooks.CheckpointSaverHook(\n self._model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n scaffold=scaffold)\n ]\n with monitored_session.MonitoredTrainingSession(\n master=self._config.master,\n is_chief=self._config.is_chief,\n checkpoint_dir=self._model_dir,\n scaffold=scaffold,\n hooks=all_hooks + model_fn_ops.training_hooks,\n chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,\n save_checkpoint_secs=0, # Saving is handled by a hook.\n save_summaries_steps=self._config.save_summary_steps,\n config=self._session_config\n ) as mon_sess:\n loss = None\n while not mon_sess.should_stop():\n _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])\n core_summary.FileWriterCache.clear()\n return loss\n\n\ndef _identity_feature_engineering_fn(features, labels):\n return features, labels\n\n\nclass Estimator(BaseEstimator):\n \"\"\"Estimator class is the basic TensorFlow model trainer/evaluator.\n \"\"\"\n\n def __init__(self,\n model_fn=None,\n model_dir=None,\n config=None,\n params=None,\n feature_engineering_fn=None):\n \"\"\"Constructs an `Estimator` instance.\n\n Args:\n model_fn: Model function. Follows the signature:\n * Args:\n * `features`: single `Tensor` or `dict` of `Tensor`s\n (depending on data passed to `fit`),\n * `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head\n models). If mode is `ModeKeys.INFER`, `labels=None` will be\n passed. If the `model_fn`'s signature does not accept\n `mode`, the `model_fn` must still be able to handle\n `labels=None`.\n * `mode`: Optional. Specifies if this training, evaluation or\n prediction. See `ModeKeys`.\n * `params`: Optional `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.\n * `config`: Optional configuration object. Will receive what is passed\n to Estimator in `config` parameter, or the default `config`.\n Allows updating things in your model_fn based on configuration\n such as `num_ps_replicas`.\n * `model_dir`: Optional directory where model parameters, graph etc\n are saved. Will receive what is passed to Estimator in\n `model_dir` parameter, or the default `model_dir`. Allows\n updating things in your model_fn that expect model_dir, such as\n training hooks.\n\n * Returns:\n `ModelFnOps`\n\n Also supports a legacy signature which returns tuple of:\n\n * predictions: `Tensor`, `SparseTensor` or dictionary of same.\n Can also be any type that is convertible to a `Tensor` or\n `SparseTensor`, or dictionary of same.\n * loss: Scalar loss `Tensor`.\n * train_op: Training update `Tensor` or `Operation`.\n\n Supports next three signatures for the function:\n\n * `(features, labels) -> (predictions, loss, train_op)`\n * `(features, labels, mode) -> (predictions, loss, train_op)`\n * `(features, labels, mode, params) -> (predictions, loss, train_op)`\n * `(features, labels, mode, params, config) ->\n (predictions, loss, train_op)`\n * `(features, labels, mode, params, config, model_dir) ->\n (predictions, loss, train_op)`\n\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: Configuration object.\n params: `dict` of hyper parameters that will be passed into `model_fn`.\n Keys are names of parameters, values are basic python types.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into `model_fn`. Please check `model_fn` for\n a definition of features and labels.\n\n Raises:\n ValueError: parameters of `model_fn` don't match `params`.\n \"\"\"\n super(Estimator, self).__init__(model_dir=model_dir, config=config)\n if model_fn is not None:\n # Check number of arguments of the given function matches requirements.\n model_fn_args = _model_fn_args(model_fn)\n if params is not None and 'params' not in model_fn_args:\n raise ValueError('Estimator\\'s model_fn (%s) does not have a params '\n 'argument, but params (%s) were passed to the '\n 'Estimator\\'s constructor.' %\n (model_fn, params))\n if params is None and 'params' in model_fn_args:\n logging.warning('Estimator\\'s model_fn (%s) includes params '\n 'argument, but params are not passed to Estimator.',\n model_fn)\n self._model_fn = model_fn\n self.params = params\n self._feature_engineering_fn = (\n feature_engineering_fn or _identity_feature_engineering_fn)\n\n def _call_model_fn(self, features, labels, mode, metrics=None):\n \"\"\"Calls model function with support of 2, 3 or 4 arguments.\n\n Args:\n features: features dict.\n labels: labels dict.\n mode: ModeKeys\n metrics: Dict of metrics.\n\n Returns:\n A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a\n `ModelFnOps` object.\n\n Raises:\n ValueError: if model_fn returns invalid objects.\n \"\"\"\n features, labels = self._feature_engineering_fn(features, labels)\n model_fn_args = _model_fn_args(self._model_fn)\n kwargs = {}\n if 'mode' in model_fn_args:\n kwargs['mode'] = mode\n if 'params' in model_fn_args:\n kwargs['params'] = self.params\n if 'config' in model_fn_args:\n kwargs['config'] = self.config\n if 'model_dir' in model_fn_args:\n kwargs['model_dir'] = self.model_dir\n model_fn_results = self._model_fn(features, labels, **kwargs)\n\n if isinstance(model_fn_results, model_fn_lib.ModelFnOps):\n model_fn_ops = model_fn_results\n else:\n # Here model_fn_results should be a tuple with 3 elements.\n if len(model_fn_results) != 3:\n raise ValueError('Unrecognized value returned by model_fn, '\n 'please return ModelFnOps.')\n model_fn_ops = model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=model_fn_results[0],\n loss=model_fn_results[1],\n train_op=model_fn_results[2])\n\n # Custom metrics should overwrite defaults.\n if metrics:\n model_fn_ops.eval_metric_ops.update(_make_metrics_ops(\n metrics, features, labels, model_fn_ops.predictions))\n\n return model_fn_ops\n\n def _get_train_ops(self, features, labels):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overridden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n \"\"\"\n return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)\n\n def _get_eval_ops(self, features, labels, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Expected to be overridden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n `ModelFnOps` object.\n\n Raises:\n ValueError: if `metrics` don't match `labels`.\n \"\"\"\n model_fn_ops = self._call_model_fn(\n features, labels, model_fn_lib.ModeKeys.EVAL, metrics)\n\n if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:\n model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (\n metrics_lib.streaming_mean(model_fn_ops.loss))\n return model_fn_ops\n\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Expected to be overridden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n \"\"\"\n labels = tensor_signature.create_placeholders_from_signatures(\n self._labels_info)\n return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)\n\n def export_savedmodel(\n self, export_dir_base, serving_input_fn,\n default_output_alternative_key=None,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n graph_rewrite_specs=(GraphRewriteSpec((tag_constants.SERVING,), ()),)):\n \"\"\"Exports inference graph as a SavedModel into given dir.\n\n Args:\n export_dir_base: A string containing a directory to write the exported\n graph and checkpoints.\n serving_input_fn: A function that takes no argument and\n returns an `InputFnOps`.\n default_output_alternative_key: the name of the head to serve when none is\n specified. Not needed for single-headed models.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported SavedModel. Each key should give the destination\n path (including the filename) relative to the assets.extra directory.\n The corresponding value gives the full path of the source file to be\n copied. For example, the simple case of copying a single file without\n renaming it is specified as\n `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n as_text: whether to write the SavedModel proto in text format.\n checkpoint_path: The checkpoint path to export. If None (the default),\n the most recent checkpoint found within the model directory is chosen.\n graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will\n produce a separate MetaGraphDef within the exported SavedModel, tagged\n and rewritten as specified. Defaults to a single entry using the\n default serving tag (\"serve\") and no rewriting.\n\n Returns:\n The string path to the exported directory.\n\n Raises:\n ValueError: if an unrecognized export_type is requested.\n \"\"\"\n if serving_input_fn is None:\n raise ValueError('serving_input_fn must be defined.')\n\n if not checkpoint_path:\n # Locate the latest checkpoint\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n if not checkpoint_path:\n raise NotFittedError(\"Couldn't find trained model at %s.\"\n % self._model_dir)\n\n export_dir = saved_model_export_utils.get_timestamped_export_dir(\n export_dir_base)\n # We'll write the SavedModel to a temporary directory and then atomically\n # rename it at the end. This helps to avoid corrupt / incomplete outputs,\n # which could otherwise occur if the job is preempted or otherwise fails\n # in the middle of SavedModel creation.\n temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)\n builder = saved_model_builder.SavedModelBuilder(temp_export_dir)\n\n # Build the base graph\n with ops.Graph().as_default() as g:\n training_util.create_global_step(g)\n\n # Call the serving_input_fn and collect the input alternatives.\n input_ops = serving_input_fn()\n input_alternatives, features = (\n saved_model_export_utils.get_input_alternatives(input_ops))\n\n # TODO(b/34388557) This is a stopgap, pending recording model provenance.\n # Record which features are expected at serving time. It is assumed that\n # these are the features that were used in training.\n for feature_key in input_ops.features.keys():\n ops.add_to_collection(\n constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)\n\n # Call the model_fn and collect the output alternatives.\n model_fn_ops = self._call_model_fn(features, None,\n model_fn_lib.ModeKeys.INFER)\n output_alternatives, actual_default_output_alternative_key = (\n saved_model_export_utils.get_output_alternatives(\n model_fn_ops, default_output_alternative_key))\n\n init_op = control_flow_ops.group(\n variables.local_variables_initializer(),\n resources.initialize_resources(resources.shared_resources()),\n lookup_ops.tables_initializer())\n\n # Build the SignatureDefs from all pairs of input and output alternatives\n signature_def_map = saved_model_export_utils.build_all_signature_defs(\n input_alternatives, output_alternatives,\n actual_default_output_alternative_key)\n\n # Export the first MetaGraphDef with variables, assets etc.\n with tf_session.Session('') as session:\n\n # pylint: disable=protected-access\n saveables = variables._all_saveable_objects()\n # pylint: enable=protected-access\n\n if (model_fn_ops.scaffold is not None and\n model_fn_ops.scaffold.saver is not None):\n saver_for_restore = model_fn_ops.scaffold.saver\n elif saveables:\n saver_for_restore = saver.Saver(saveables, sharded=True)\n\n saver_for_restore.restore(session, checkpoint_path)\n\n # Perform the export\n if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:\n raise ValueError('The first element of graph_rewrite_specs '\n 'must specify no transforms.')\n untransformed_tags = graph_rewrite_specs[0].tags\n\n # TODO(soergel): switch to main_op or otherwise update when dust settles\n builder.add_meta_graph_and_variables(\n session, untransformed_tags,\n signature_def_map=signature_def_map,\n assets_collection=ops.get_collection(\n ops.GraphKeys.ASSET_FILEPATHS),\n legacy_init_op=init_op)\n\n # pylint: disable=protected-access\n base_meta_graph_def = builder._saved_model.meta_graphs[0]\n # pylint: enable=protected-access\n\n if graph_rewrite_specs[1:]:\n # Prepare the input_names and output_names needed for the\n # meta_graph_transform call below.\n input_names = [tensor.name\n for input_dict in input_alternatives.values()\n for tensor in input_dict.values()]\n output_names = [tensor.name\n for output_alternative in output_alternatives.values()\n for tensor in output_alternative[1].values()]\n\n # Write the additional MetaGraphDefs\n for graph_rewrite_spec in graph_rewrite_specs[1:]:\n\n # TODO(soergel) consider moving most of this to saved_model.builder_impl\n # as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)\n\n transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(\n base_meta_graph_def, input_names, output_names,\n graph_rewrite_spec.transforms, graph_rewrite_spec.tags)\n\n # pylint: disable=protected-access\n meta_graph_def = builder._saved_model.meta_graphs.add()\n # pylint: enable=protected-access\n meta_graph_def.CopyFrom(transformed_meta_graph_def)\n\n # Add the extra assets\n if assets_extra:\n assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),\n compat.as_bytes('assets.extra'))\n for dest_relative, source in assets_extra.items():\n dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),\n compat.as_bytes(dest_relative))\n dest_path = os.path.dirname(dest_absolute)\n gfile.MakeDirs(dest_path)\n gfile.Copy(source, dest_absolute)\n\n builder.save(as_text)\n gfile.Rename(temp_export_dir, export_dir)\n return export_dir\n\n\n# For time of deprecation x,y from Estimator allow direct access.\n# pylint: disable=protected-access\nclass SKCompat(sklearn.BaseEstimator):\n \"\"\"Scikit learn wrapper for TensorFlow Learn Estimator.\"\"\"\n\n def __init__(self, estimator):\n self._estimator = estimator\n\n def fit(self, x, y, batch_size=128, steps=None, max_steps=None,\n monitors=None):\n input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,\n batch_size=batch_size, shuffle=True,\n epochs=None)\n all_monitors = []\n if feed_fn:\n all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]\n if monitors:\n all_monitors.extend(monitors)\n\n self._estimator.fit(input_fn=input_fn,\n steps=steps,\n max_steps=max_steps,\n monitors=all_monitors)\n return self\n\n def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):\n input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,\n feed_fn=None, batch_size=batch_size,\n shuffle=False, epochs=1)\n if metrics is not None and not isinstance(metrics, dict):\n raise ValueError('Metrics argument should be None or dict. '\n 'Got %s.' % metrics)\n eval_results, global_step = self._estimator._evaluate_model(\n input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n metrics=metrics,\n name=name)\n if eval_results is not None:\n eval_results.update({'global_step': global_step})\n return eval_results\n\n def predict(self, x, batch_size=128, outputs=None):\n input_fn, feed_fn = _get_input_fn(\n x, None, input_fn=None, feed_fn=None, batch_size=batch_size,\n shuffle=False, epochs=1)\n results = list(\n self._estimator._infer_model(\n input_fn=input_fn,\n feed_fn=feed_fn,\n outputs=outputs,\n as_iterable=True,\n iterate_batches=True))\n if not isinstance(results[0], dict):\n return np.concatenate([output for output in results], axis=0)\n return {\n key: np.concatenate(\n [output[key] for output in results], axis=0)\n for key in results[0]\n }\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for debugger functionalities in tf.Session.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.util import event_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.lib import debug_data\nfrom tensorflow.python.debug.lib import debug_utils\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\n\n\ndef no_rewrite_session_config():\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n return config_pb2.ConfigProto(graph_options=graph_options)\n\n\nclass _RNNCellForTest(rnn_cell_impl.RNNCell):\n \"\"\"RNN cell for testing.\"\"\"\n\n def __init__(self, input_output_size, state_size):\n self._input_output_size = input_output_size\n self._state_size = state_size\n self._w = variables.Variable(1.0, dtype=dtypes.float32, name=\"w\")\n\n @property\n def output_size(self):\n return self._input_output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def __call__(self, input_, state, scope=None):\n return (math_ops.multiply(self._w, input_), state)\n\n\nclass SessionDebugTestBase(test_util.TensorFlowTestCase):\n \"\"\"Base class for unit tests of tfdbg running with tf.Session.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n if test.is_gpu_available():\n cls._expected_partition_graph_count = 2\n cls._expected_num_devices = 2\n gpu_name = test_util.gpu_device_name()\n cls._main_device = \"/job:localhost/replica:0/task:0\" + gpu_name\n else:\n cls._expected_partition_graph_count = 1\n cls._expected_num_devices = 1\n cls._main_device = \"/job:localhost/replica:0/task:0/cpu:0\"\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n self._dump_root = tempfile.mkdtemp()\n\n def tearDown(self):\n ops.reset_default_graph()\n\n # Tear down temporary dump directory.\n if os.path.isdir(self._dump_root):\n shutil.rmtree(self._dump_root)\n\n def _debug_urls(self, run_number=None):\n raise NotImplementedError(\n \"_debug_urls() method is not implemented in the base test class.\")\n\n def _debug_dump_dir(self, run_number=None):\n raise NotImplementedError(\n \"_debug_dump_dir() method is not implemented in the base test class.\")\n\n def _debug_run_and_get_dump(self,\n sess,\n fetches,\n feed_dict=None,\n debug_ops=\"DebugIdentity\",\n tolerate_debug_op_creation_failures=False,\n global_step=-1,\n validate=True,\n expected_partition_graph_count=None):\n \"\"\"Run fetches with debugging and obtain DebugDumpDir.\n\n Args:\n sess: the tf.Session to be used.\n fetches: fetches of the Session.run().\n feed_dict: feed dict for the Session.run().\n debug_ops: name(s) of the debug ops to be used.\n tolerate_debug_op_creation_failures: whether to tolerate debug op\n creation failures.\n global_step: Optional global step.\n validate: whether to validate dumped tensors against graph.\n expected_partition_graph_count: optional count of partition graphs to\n assert on.\n\n Returns:\n 1. Return values of the Session.run().\n 2. The DebugDumpDir object from the debugged run().\n \"\"\"\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=debug_ops,\n debug_urls=self._debug_urls(),\n tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,\n global_step=global_step)\n run_metadata = config_pb2.RunMetadata()\n run_output = sess.run(fetches,\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n\n if expected_partition_graph_count is not None:\n self.assertEqual(expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n return run_output, debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs,\n validate=validate)\n\n def _generate_dump_from_simple_addition_graph(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n v_init_val = np.array([[2.0], [-1.0]])\n\n # Use node names with overlapping namespace (i.e., parent directory) to\n # test concurrent, non-racing directory creation.\n u_name = \"u\"\n v_name = \"v\"\n w_name = \"w\"\n\n u_init = constant_op.constant(u_init_val, shape=[2, 2])\n u = variables.Variable(u_init, name=u_name)\n v_init = constant_op.constant(v_init_val, shape=[2, 1])\n v = variables.Variable(v_init, name=v_name)\n\n w = math_ops.matmul(u, v, name=w_name)\n\n u.initializer.run()\n v.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = \"file://%s\" % self._dump_root\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % u_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % v_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n\n # Invoke Session.run().\n sess.run(w, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n simple_add_results = collections.namedtuple(\"SimpleAddResults\", [\n \"u_init_val\", \"v_init_val\", \"u\", \"v\", \"w\", \"u_name\", \"v_name\", \"w_name\",\n \"dump\"\n ])\n return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,\n w_name, dump)\n\n def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):\n with session.Session() as sess:\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess.run(variables.global_variables_initializer())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n debug_utils.add_debug_tensor_watch(\n run_options,\n \"u\",\n 0, [\"DebugNumericSummary(gated_grpc=True)\", \"DebugIdentity\"],\n debug_urls=debug_urls)\n debug_utils.add_debug_tensor_watch(\n run_options, \"v\", 0, [\"DebugNumericSummary\"], debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n r = sess.run(w, options=run_options, run_metadata=run_metadata)\n self.assertAllClose(42.0, r)\n\n u_copy_node_def = None\n v_copy_node_def = None\n for partition_graph in run_metadata.partition_graphs:\n for node_def in partition_graph.node:\n if debug_data.is_copy_node(node_def.name):\n if node_def.name == \"__copy_u_0\":\n u_copy_node_def = node_def\n elif node_def.name == \"__copy_v_0\":\n v_copy_node_def = node_def\n\n self.assertIsNotNone(u_copy_node_def)\n debug_ops_spec = u_copy_node_def.attr[\"debug_ops_spec\"].list.s\n self.assertEqual(2, len(debug_ops_spec))\n self.assertEqual(\"DebugNumericSummary;%s;1\" % debug_urls[0],\n debug_ops_spec[0].decode(\"utf-8\"))\n self.assertEqual(\"DebugIdentity;%s;0\" % debug_urls[0],\n debug_ops_spec[1].decode(\"utf-8\"))\n\n self.assertIsNotNone(v_copy_node_def)\n debug_ops_spec = v_copy_node_def.attr[\"debug_ops_spec\"].list.s\n self.assertEqual(1, len(debug_ops_spec))\n self.assertEqual(\"DebugNumericSummary;%s;0\" % debug_urls[0],\n debug_ops_spec[0].decode(\"utf-8\"))\n\n def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):\n results = self._generate_dump_from_simple_addition_graph()\n self.assertTrue(results.dump.loaded_partition_graphs())\n\n # Since global_step is not explicitly specified, it should take its default\n # value: -1.\n self.assertEqual(-1, results.dump.core_metadata.global_step)\n self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)\n self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)\n self.assertEqual([], results.dump.core_metadata.input_names)\n self.assertEqual([results.w.name], results.dump.core_metadata.output_names)\n self.assertEqual([], results.dump.core_metadata.target_nodes)\n\n # Verify the dumped tensor values for u and v.\n self.assertEqual(2, results.dump.size)\n\n self.assertAllClose([results.u_init_val],\n results.dump.get_tensors(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\"))\n self.assertAllClose([results.v_init_val],\n results.dump.get_tensors(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\"))\n\n self.assertGreaterEqual(\n results.dump.get_rel_timestamps(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreaterEqual(\n results.dump.get_rel_timestamps(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\")[0], 0)\n\n self.assertGreater(\n results.dump.get_dump_sizes_bytes(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreater(\n results.dump.get_dump_sizes_bytes(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\")[0], 0)\n\n def testGetOpTypeWorks(self):\n results = self._generate_dump_from_simple_addition_graph()\n\n self.assertEqual(results.u.op.type,\n results.dump.node_op_type(results.u_name))\n self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))\n self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))\n\n with self.assertRaisesRegexp(\n ValueError, r\"None of the .* device\\(s\\) has a node named \"):\n results.dump.node_op_type(\"foo_bar\")\n\n def testDumpStringTensorsWorks(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n str1_init_val = np.array(b\"abc\")\n str2_init_val = np.array(b\"def\")\n\n str1_init = constant_op.constant(str1_init_val)\n str2_init = constant_op.constant(str2_init_val)\n\n str1_name = \"str1\"\n str2_name = \"str2\"\n str1 = variables.Variable(str1_init, name=str1_name)\n str2 = variables.Variable(str2_init, name=str2_name)\n # Concatenate str1 and str2\n str_concat = math_ops.add(str1, str2, name=\"str_concat\")\n\n str1.initializer.run()\n str2.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % str1_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % str2_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(str_concat, options=run_options, run_metadata=run_metadata)\n\n # String ops are located on CPU.\n self.assertEqual(1, len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n self.assertIn(str1_name, dump.nodes())\n self.assertIn(str2_name, dump.nodes())\n\n self.assertEqual(2, dump.size)\n\n self.assertEqual([str1_init_val],\n dump.get_tensors(\"%s/read\" % str1_name, 0,\n \"DebugIdentity\"))\n self.assertEqual([str2_init_val],\n dump.get_tensors(\"%s/read\" % str2_name, 0,\n \"DebugIdentity\"))\n\n self.assertGreaterEqual(\n dump.get_rel_timestamps(\"%s/read\" % str1_name, 0, \"DebugIdentity\")[0],\n 0)\n self.assertGreaterEqual(\n dump.get_rel_timestamps(\"%s/read\" % str2_name, 0, \"DebugIdentity\")[0],\n 0)\n\n self.assertGreater(\n dump.get_dump_sizes_bytes(\"%s/read\" % str1_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreater(\n dump.get_dump_sizes_bytes(\"%s/read\" % str2_name, 0,\n \"DebugIdentity\")[0], 0)\n\n def testDumpUninitializedVariable(self):\n op_namespace = \"testDumpUninitializedVariable\"\n with session.Session() as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n s_init_val = b\"str1\"\n\n u_name = \"%s/u\" % op_namespace\n s_name = \"%s/s\" % op_namespace\n\n u_init = constant_op.constant(u_init_val, shape=[2, 2])\n u = variables.Variable(u_init, name=u_name)\n s_init = constant_op.constant(s_init_val)\n s = variables.Variable(s_init, name=s_name)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, u_name, 0, debug_urls=debug_urls)\n debug_utils.add_debug_tensor_watch(\n run_options, s_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n\n # Initialize u and s.\n sess.run(variables.global_variables_initializer(),\n options=run_options,\n run_metadata=run_metadata)\n\n # Verify the dump file for the uninitialized value of u.\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n self.assertEqual(2, dump.size)\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n # Verify that the variable is properly initialized by the run() call.\n u_vals = dump.get_tensors(u_name, 0, \"DebugIdentity\")\n s_vals = dump.get_tensors(s_name, 0, \"DebugIdentity\")\n self.assertEqual(1, len(u_vals))\n self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)\n self.assertFalse(u_vals[0].initialized)\n self.assertEqual(1, len(s_vals))\n self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)\n self.assertFalse(s_vals[0].initialized)\n\n # Call run() again, to check that u is initialized properly.\n self.assertAllClose(u_init_val, sess.run(u))\n self.assertEqual(s_init_val, sess.run(s))\n\n def testDebugWhileLoopGeneratesMultipleDumps(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n num_iter = 10\n\n # \"u\" is the Variable being updated in the loop.\n u_name = \"testDumpToFileWhileLoop/u\"\n u_namespace = u_name.split(\"/\")[0]\n\n u_init_val = np.array(11.0)\n u_init = constant_op.constant(u_init_val)\n u = variables.Variable(u_init, name=u_name)\n\n # \"v\" is the increment.\n v_name = \"testDumpToFileWhileLoop/v\"\n v_namespace = v_name.split(\"/\")[0]\n\n v_init_val = np.array(2.0)\n v_init = constant_op.constant(v_init_val)\n v = variables.Variable(v_init, name=v_name)\n\n u.initializer.run()\n v.initializer.run()\n\n i = constant_op.constant(0, name=\"testDumpToFileWhileLoop/i\")\n\n def cond(i):\n return math_ops.less(i, num_iter)\n\n def body(i):\n new_u = state_ops.assign_add(u, v)\n new_i = math_ops.add(i, 1)\n op = control_flow_ops.group(new_u)\n new_i = control_flow_ops.with_dependencies([op], new_i)\n return [new_i]\n\n loop = control_flow_ops.while_loop(\n cond, body, [i], parallel_iterations=10)\n\n # Create RunOptions for debug-watching tensors\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, u_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % v_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for while/Identity.\n debug_utils.add_debug_tensor_watch(\n run_options, \"while/Identity\", 0, debug_urls=debug_urls)\n # Add debug tensor watch for while/Add/y.\n debug_utils.add_debug_tensor_watch(\n run_options, \"while/Add/y\", 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n r = sess.run(loop, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n self.assertEqual(num_iter, r)\n u_val_final = sess.run(u)\n self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)\n\n # Verify dump files\n self.assertTrue(os.path.isdir(self._dump_root))\n\n u_glob_out = glob.glob(os.path.join(self._dump_root, \"*\", u_namespace))\n v_glob_out = glob.glob(os.path.join(\n self._dump_root, \"*\", v_namespace, \"v\"))\n self.assertTrue(os.path.isdir(u_glob_out[0]))\n self.assertTrue(os.path.isdir(v_glob_out[0]))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Expected dumped tensors: u, v/read, 10 iterations of while/Identity,\n # and 10 iterations of while/Add/y.\n self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)\n\n # Verify tensor values.\n self.assertAllClose([u_init_val],\n dump.get_tensors(u_name, 0, \"DebugIdentity\"))\n self.assertAllClose([v_init_val],\n dump.get_tensors(\"%s/read\" % v_name, 0,\n \"DebugIdentity\"))\n\n while_id_tensors = dump.get_tensors(\"while/Identity\", 0, \"DebugIdentity\")\n self.assertEqual(10, len(while_id_tensors))\n for k in xrange(len(while_id_tensors)):\n self.assertAllClose(np.array(k), while_id_tensors[k])\n\n # Verify ascending timestamps from the while loops.\n while_id_rel_timestamps = dump.get_rel_timestamps(\"while/Identity\", 0,\n \"DebugIdentity\")\n while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes(\"while/Identity\", 0,\n \"DebugIdentity\")\n self.assertEqual(10, len(while_id_rel_timestamps))\n prev_rel_time = 0\n prev_dump_size_bytes = while_id_dump_sizes_bytes[0]\n for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,\n while_id_dump_sizes_bytes):\n self.assertGreaterEqual(rel_time, prev_rel_time)\n self.assertEqual(dump_size_bytes, prev_dump_size_bytes)\n prev_rel_time = rel_time\n prev_dump_size_bytes = dump_size_bytes\n\n # Test querying debug watch keys from node name.\n watch_keys = dump.debug_watch_keys(\"while/Identity\")\n self.assertEqual([\"while/Identity:0:DebugIdentity\"], watch_keys)\n\n # Test querying debug datum instances from debug watch key.\n self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))\n self.assertEqual([], dump.watch_key_to_data(\"foo\"))\n\n def testDebugWhileLoopWatchingWholeGraphWorks(self):\n with session.Session() as sess:\n loop_body = lambda i: math_ops.add(i, 2)\n loop_cond = lambda i: math_ops.less(i, 16)\n\n i = constant_op.constant(10, name=\"i\")\n loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])\n\n loop_result, dump = self._debug_run_and_get_dump(sess, loop)\n self.assertEqual(16, loop_result)\n\n self.assertEqual(\n [[10]], dump.get_tensors(\"while/Enter\", 0, \"DebugIdentity\"))\n self.assertEqual(\n [[12], [14], [16]],\n dump.get_tensors(\"while/NextIteration\", 0, \"DebugIdentity\"))\n\n def testDebugTrainingDynamicRNNWorks(self):\n with session.Session() as sess:\n input_size = 3\n state_size = 2\n time_steps = 4\n batch_size = 2\n\n input_values = np.random.randn(time_steps, batch_size, input_size)\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n\n outputs_dynamic, _ = rnn.dynamic_rnn(\n _RNNCellForTest(input_size, state_size),\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32)\n toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)\n train_op = gradient_descent.GradientDescentOptimizer(\n learning_rate=0.1).minimize(toy_loss, name=\"train_op\")\n\n sess.run(variables.global_variables_initializer())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph_with_blacklists(\n run_options,\n sess.graph,\n node_name_regex_blacklist=\"(.*rnn/while/.*|.*TensorArray.*)\",\n debug_urls=self._debug_urls())\n # b/36870549: Nodes with these name patterns need to be excluded from\n # tfdbg in order to prevent MSAN warnings of uninitialized Tensors\n # under both file:// and grpc:// debug URL schemes.\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(train_op, feed_dict={concat_inputs: input_values},\n options=run_options, run_metadata=run_metadata)\n\n debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n def testDebugCondWatchingWholeGraphWorks(self):\n with session.Session() as sess:\n x = variables.Variable(10.0, name=\"x\")\n y = variables.Variable(20.0, name=\"y\")\n cond = control_flow_ops.cond(\n x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))\n\n sess.run(variables.global_variables_initializer())\n\n cond_result, dump = self._debug_run_and_get_dump(sess, cond)\n self.assertEqual(21, cond_result)\n\n self.assertAllClose(\n [21.0], dump.get_tensors(\"cond/Merge\", 0, \"DebugIdentity\"))\n\n def testFindNodesWithBadTensorValues(self):\n with session.Session() as sess:\n u_name = \"testFindNodesWithBadTensorValues/u\"\n v_name = \"testFindNodesWithBadTensorValues/v\"\n w_name = \"testFindNodesWithBadTensorValues/w\"\n x_name = \"testFindNodesWithBadTensorValues/x\"\n y_name = \"testFindNodesWithBadTensorValues/y\"\n z_name = \"testFindNodesWithBadTensorValues/z\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v_init = constant_op.constant([2.0, 1.0])\n v = variables.Variable(v_init, name=v_name)\n\n # Expected output: [0.0, 3.0]\n w = math_ops.subtract(u, v, name=w_name)\n\n # Expected output: [inf, 1.3333]\n x = math_ops.div(u, w, name=x_name)\n\n # Expected output: [nan, 4.0]\n y = math_ops.multiply(w, x, name=y_name)\n\n z = math_ops.multiply(y, y, name=z_name)\n\n u.initializer.run()\n v.initializer.run()\n\n _, dump = self._debug_run_and_get_dump(\n sess, z,\n expected_partition_graph_count=self._expected_partition_graph_count)\n\n def has_bad_value(_, tensor):\n return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))\n\n # Find all \"offending tensors\".\n bad_data = dump.find(has_bad_value)\n\n # Verify that the nodes with bad values are caught through running find\n # on the debug dump.\n self.assertEqual(3, len(bad_data))\n self.assertEqual(x_name, bad_data[0].node_name)\n self.assertEqual(y_name, bad_data[1].node_name)\n self.assertEqual(z_name, bad_data[2].node_name)\n\n # Test first_n kwarg of find(): Find the first offending tensor.\n first_bad_datum = dump.find(has_bad_value, first_n=1)\n\n self.assertEqual(1, len(first_bad_datum))\n self.assertEqual(x_name, first_bad_datum[0].node_name)\n\n def _session_run_for_graph_structure_lookup(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n u_name = \"testDumpGraphStructureLookup/u\"\n v_name = \"testDumpGraphStructureLookup/v\"\n w_name = \"testDumpGraphStructureLookup/w\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v = math_ops.add(u, u, name=v_name)\n w = math_ops.add(v, v, name=w_name)\n\n u.initializer.run()\n\n _, dump = self._debug_run_and_get_dump(\n sess, w,\n expected_partition_graph_count=self._expected_partition_graph_count)\n\n return u_name, v_name, w_name, dump\n\n def testGraphStructureLookupGivesDevicesAndNodesInfo(self):\n u_name, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n # Test num_devices().\n self.assertEqual(self._expected_num_devices, len(dump.devices()))\n\n # Test node_device().\n self.assertEqual(self._main_device, dump.node_device(u_name))\n\n with self.assertRaisesRegexp(ValueError,\n \"does not exist in partition graphs\"):\n dump.node_device(u_name + \"foo\")\n\n # Test node_exists().\n self.assertTrue(dump.node_exists(u_name))\n self.assertTrue(dump.node_exists(u_name + \"/read\"))\n self.assertFalse(dump.node_exists(u_name + \"/read\" + \"/foo\"))\n\n def testGraphStructureLookupGivesNodesAndAttributes(self):\n u_name, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n u_read_name = u_name + \"/read\"\n\n # Test node name list lookup of the DebugDumpDir object.\n if test_util.gpu_device_name():\n node_names = dump.nodes(\n device_name=\"/job:localhost/replica:0/task:0/device:GPU:0\")\n else:\n node_names = dump.nodes()\n self.assertTrue(u_name in node_names)\n self.assertTrue(u_read_name in node_names)\n\n # Test querying node attributes.\n u_attr = dump.node_attributes(u_name)\n self.assertEqual(dtypes.float32, u_attr[\"dtype\"].type)\n self.assertEqual(1, len(u_attr[\"shape\"].shape.dim))\n self.assertEqual(2, u_attr[\"shape\"].shape.dim[0].size)\n\n with self.assertRaisesRegexp(\n ValueError, r\"None of the .* device\\(s\\) has a node named \"):\n dump.node_attributes(\"foo\")\n\n def testGraphStructureLookupGivesDebugWatchKeys(self):\n u_name, v_name, w_name, dump = (\n self._session_run_for_graph_structure_lookup())\n\n # Test querying the debug watch keys with node names.\n self.assertEqual([\"%s:0:DebugIdentity\" % u_name],\n dump.debug_watch_keys(u_name))\n self.assertEqual([\"%s:0:DebugIdentity\" % v_name],\n dump.debug_watch_keys(v_name))\n self.assertEqual([\"%s:0:DebugIdentity\" % w_name],\n dump.debug_watch_keys(w_name))\n self.assertEqual([], dump.debug_watch_keys(\"foo\"))\n\n # Test querying debug datum instances from debug watch.\n u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])\n self.assertEqual(1, len(u_data))\n self.assertEqual(u_name, u_data[0].node_name)\n self.assertEqual(0, u_data[0].output_slot)\n self.assertEqual(\"DebugIdentity\", u_data[0].debug_op)\n self.assertGreaterEqual(u_data[0].timestamp, 0)\n self.assertEqual([], dump.watch_key_to_data(\"foo\"))\n\n def testGraphStructureLookupGivesNodeInputsAndRecipients(self):\n u_name, v_name, w_name, dump = (\n self._session_run_for_graph_structure_lookup())\n\n u_read_name = u_name + \"/read\"\n\n # Test the inputs lookup of the DebugDumpDir object.\n self.assertEqual([], dump.node_inputs(u_name))\n self.assertEqual([u_name], dump.node_inputs(u_read_name))\n self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))\n self.assertEqual([v_name] * 2, dump.node_inputs(w_name))\n\n self.assertEqual([], dump.node_inputs(u_name, is_control=True))\n self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))\n self.assertEqual([], dump.node_inputs(v_name, is_control=True))\n self.assertEqual([], dump.node_inputs(w_name, is_control=True))\n\n # Test the outputs recipient lookup of the DebugDumpDir object.\n self.assertTrue(u_read_name in dump.node_recipients(u_name))\n self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))\n self.assertEqual(2, dump.node_recipients(v_name).count(w_name))\n\n self.assertEqual([], dump.node_recipients(u_name, is_control=True))\n self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))\n self.assertEqual([], dump.node_recipients(v_name, is_control=True))\n self.assertEqual([], dump.node_recipients(w_name, is_control=True))\n\n # Test errors raised on invalid node names.\n with self.assertRaisesRegexp(\n ValueError, r\"None of the .* device\\(s\\) has a node named \"):\n dump.node_inputs(u_name + \"foo\")\n with self.assertRaisesRegexp(\n ValueError, r\"None of the .* device\\(s\\) has a node named \"):\n dump.node_recipients(u_name + \"foo\")\n\n # Test transitive_inputs().\n self.assertEqual([], dump.transitive_inputs(u_name))\n self.assertEqual([u_name], dump.transitive_inputs(u_read_name))\n self.assertEqual(\n set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))\n self.assertEqual(\n set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))\n\n with self.assertRaisesRegexp(\n ValueError, r\"None of the .* device\\(s\\) has a node named \"):\n dump.transitive_inputs(u_name + \"foo\")\n\n def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):\n _, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n # Now load the dump again, without the partition graphs, so we can check\n # errors are not raised because the partition graphs are loaded from the\n # dump directory.\n dump = debug_data.DebugDumpDir(self._dump_root, validate=False)\n self.assertTrue(dump.loaded_partition_graphs())\n\n def testGraphPathFindingOnControlEdgesWorks(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n v1 = variables.Variable(1.0, name=\"v1\")\n v2 = variables.Variable(2.0, name=\"v2\")\n v3 = variables.Variable(3.0, name=\"v3\")\n a = math_ops.add(v1, v2, name=\"a\")\n with ops.control_dependencies([a]):\n c = math_ops.subtract(v3, v3, name=\"c\")\n\n sess.run(variables.global_variables_initializer())\n _, dump = self._debug_run_and_get_dump(sess, c)\n\n self.assertEqual([\"v1\", \"v1/read\", \"a\", \"c\"],\n dump.find_some_path(\"v1\", \"c\"))\n self.assertIsNone(dump.find_some_path(\"v1\", \"c\", include_control=False))\n\n def testGraphPathFindingReverseRefEdgeWorks(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n v = variables.Variable(10.0, name=\"v\")\n delta = variables.Variable(1.0, name=\"delta\")\n inc_v = state_ops.assign_add(v, delta, name=\"inc_v\")\n\n sess.run(variables.global_variables_initializer())\n _, dump = self._debug_run_and_get_dump(sess, inc_v)\n\n self.assertEqual(\n [\"delta\", \"delta/read\", \"inc_v\", \"v\"],\n dump.find_some_path(\"delta\", \"v\", include_reversed_ref=True))\n self.assertIsNone(dump.find_some_path(\"delta\", \"v\"))\n\n def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):\n with session.Session() as sess:\n u_name = \"testDumpCausalityCheck/u\"\n v_name = \"testDumpCausalityCheck/v\"\n w_name = \"testDumpCausalityCheck/w\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v = math_ops.add(u, u, name=v_name)\n w = math_ops.add(v, v, name=w_name)\n\n u.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(w, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n # First, loading the original dump without supplying the\n # partition_graphs should not cause a LookupError, validation occurs\n # only with partition_graphs loaded.\n debug_data.DebugDumpDir(self._dump_root)\n\n # Now, loading the original dump with partition graphs supplied should\n # succeed. The validation should pass quietly.\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Get the dump file names and compute their timestamps.\n self.assertEqual(\n 1, len(dump.get_tensor_file_paths(v_name, 0, \"DebugIdentity\")))\n v_file_path = dump.get_tensor_file_paths(v_name, 0, \"DebugIdentity\")[0]\n\n self.assertEqual(\n 1, len(dump.get_tensor_file_paths(w_name, 0, \"DebugIdentity\")))\n w_file_path = dump.get_tensor_file_paths(w_name, 0, \"DebugIdentity\")[0]\n\n v_timestamp = int(v_file_path[v_file_path.rindex(\"_\") + 1:])\n w_timestamp = int(w_file_path[w_file_path.rindex(\"_\") + 1:])\n\n # Swap and slightly shift the time stamps of the last two dumped tensors,\n # to simulate \"causality violation\", which can happen if the dump\n # directory contains incomplete data and/or mixes data from different\n # Session.run() calls.\n v_file_path_1 = v_file_path[:v_file_path.rindex(\n \"_\")] + \"_%d\" % w_timestamp\n w_file_path_1 = w_file_path[:w_file_path.rindex(\"_\")] + \"_%d\" % (\n v_timestamp - 1)\n\n os.rename(v_file_path, v_file_path_1)\n os.rename(w_file_path, w_file_path_1)\n\n # Load the dump directory again. Now a ValueError is expected to be\n # raised due to the timestamp swap.\n with self.assertRaisesRegexp(ValueError, \"Causality violated\"):\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Loading the dump directory with kwarg \"validate\" set explicitly to\n # False should get rid of the error.\n dump = debug_data.DebugDumpDir(\n self._dump_root,\n partition_graphs=run_metadata.partition_graphs,\n validate=False)\n\n # Next, set the two times stamps to be the same, which should be fine.\n v_file_path_2 = v_file_path[:v_file_path.rindex(\n \"_\")] + \"_%d\" % w_timestamp\n w_file_path_2 = w_file_path[:w_file_path.rindex(\n \"_\")] + \"_%d\" % w_timestamp\n\n os.rename(v_file_path_1, v_file_path_2)\n os.rename(w_file_path_1, w_file_path_2)\n\n debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):\n with session.Session() as sess:\n x_name = \"oneOfTwoSlots/x\"\n u_name = \"oneOfTwoSlots/u\"\n v_name = \"oneOfTwoSlots/v\"\n w_name = \"oneOfTwoSlots/w\"\n y_name = \"oneOfTwoSlots/y\"\n\n x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)\n sess.run(x.initializer)\n\n unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)\n\n v = math_ops.add(unique_x, unique_x, name=v_name)\n w = math_ops.add(indices, indices, name=w_name)\n y = math_ops.add(w, w, name=y_name)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n # Watch only the first output slot of u, even though it has two output\n # slots.\n debug_utils.add_debug_tensor_watch(\n run_options, u_name, 0, debug_urls=self._debug_urls())\n debug_utils.add_debug_tensor_watch(\n run_options, w_name, 0, debug_urls=self._debug_urls())\n debug_utils.add_debug_tensor_watch(\n run_options, y_name, 0, debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run([v, y], options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root,\n partition_graphs=run_metadata.partition_graphs,\n validate=True)\n\n self.assertAllClose([1, 3, 7],\n dump.get_tensors(u_name, 0, \"DebugIdentity\")[0])\n\n def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):\n \"\"\"Test watching output slots not attached to any outgoing edges.\"\"\"\n\n with session.Session() as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n u = constant_op.constant(u_init_val, shape=[2, 2], name=\"u\")\n\n # Create a control edge from a node with an output: From u to z.\n # Node u will get executed only because of the control edge. The output\n # tensor u:0 is not attached to any outgoing edge in the graph. This test\n # checks that the debugger can watch such a tensor.\n with ops.control_dependencies([u]):\n z = control_flow_ops.no_op(name=\"z\")\n\n _, dump = self._debug_run_and_get_dump(sess, z)\n\n # Assert that the DebugIdentity watch on u works properly.\n self.assertEqual(1, len(dump.dumped_tensor_data))\n datum = dump.dumped_tensor_data[0]\n self.assertEqual(\"u\", datum.node_name)\n self.assertEqual(0, datum.output_slot)\n self.assertEqual(\"DebugIdentity\", datum.debug_op)\n self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())\n\n def testWatchingVariableUpdateOpsSeesUpdatedValues(self):\n \"\"\"Watch output slots on Variable-updating ops, with no emitted edges.\"\"\"\n\n with session.Session() as sess:\n u_init = constant_op.constant(10.0)\n u = variables.Variable(u_init, name=\"gdo/u\")\n v_init = constant_op.constant(20.0)\n v = variables.Variable(v_init, name=\"gdo/v\")\n\n w = math_ops.multiply(u, v, name=\"gdo/w\")\n # gdo stands for GradientDescentOptimizer.\n\n train_op = gradient_descent.GradientDescentOptimizer(\n learning_rate=0.1).minimize(\n w, name=\"gdo/train\")\n\n u.initializer.run()\n v.initializer.run()\n\n _, dump = self._debug_run_and_get_dump(sess, train_op)\n\n update_u_data = dump.watch_key_to_data(\n \"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity\")\n self.assertEqual(1, len(update_u_data))\n\n # Gradient descent on u: w = u * v, so dw / du = v.\n # Updated value of u should be:\n # 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0\n self.assertAllClose(8.0, update_u_data[0].get_tensor())\n\n update_v_data = dump.watch_key_to_data(\n \"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity\")\n self.assertEqual(1, len(update_v_data))\n\n # Gradient descent on u: w = u * v, so dw / dv = u.\n # Updated value of u should be:\n # 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0\n self.assertAllClose(19.0, update_v_data[0].get_tensor())\n\n # Verify that the Variables u and v are updated properly.\n self.assertAllClose(8.0, sess.run(u))\n self.assertAllClose(19.0, sess.run(v))\n\n def testAllowsWatchingUnconnectedOutputTensor(self):\n \"\"\"Watch an output slot not emitting any edges.\n\n (Not even control edges from the node.)\n \"\"\"\n\n with session.Session() as sess:\n x_init = constant_op.constant([2, 2, 3, 5, 5])\n x = variables.Variable(x_init, name=\"unconnected/x\")\n\n # The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the\n # graph. Let the debugger watch the unused slot 1.\n unique_x, _ = array_ops.unique(x, name=\"unconnected/unique_x\")\n y = math_ops.add(unique_x, [0, 1, 2], name=\"unconnected/y\")\n\n x.initializer.run()\n\n # Verify that only slot 0 of unique_x has recipients, while slot 1 of the\n # same node does not have recipients.\n unique_x_slot_0_recipients = []\n unique_x_slot_1_recipients = []\n for op in sess.graph.get_operations():\n for inp in op.inputs:\n if inp.name == \"unconnected/unique_x:0\":\n unique_x_slot_0_recipients.append(op.name)\n elif inp.name == \"unconnected/unique_x:1\":\n unique_x_slot_1_recipients.append(op.name)\n\n self.assertEqual([\"unconnected/y\"], unique_x_slot_0_recipients)\n self.assertEqual([], unique_x_slot_1_recipients)\n\n y_result, dump = self._debug_run_and_get_dump(sess, y)\n self.assertAllClose([2, 4, 7], y_result)\n\n # Assert that the connected slot (slot 0) is dumped properly.\n unique_x_slot_0_dumps = dump.watch_key_to_data(\n \"unconnected/unique_x:0:DebugIdentity\")\n self.assertEqual(1, len(unique_x_slot_0_dumps))\n self.assertEqual(\"unconnected/unique_x\",\n unique_x_slot_0_dumps[0].node_name)\n self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)\n self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())\n\n # Assert that the unconnected slot (slot 1) is dumped properly.\n unique_x_slot_1_dumps = dump.watch_key_to_data(\n \"unconnected/unique_x:1:DebugIdentity\")\n self.assertEqual(1, len(unique_x_slot_1_dumps))\n self.assertEqual(\"unconnected/unique_x\",\n unique_x_slot_1_dumps[0].node_name)\n self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)\n self.assertAllClose([0, 0, 1, 2, 2],\n unique_x_slot_1_dumps[0].get_tensor())\n\n def testSuccessiveDebuggingRunsIncreasesCounters(self):\n \"\"\"Test repeated Session.run() calls with debugger increments counters.\"\"\"\n\n with session.Session() as sess:\n ph = array_ops.placeholder(dtypes.float32, name=\"successive/ph\")\n x = array_ops.transpose(ph, name=\"mismatch/x\")\n y = array_ops.squeeze(ph, name=\"mismatch/y\")\n\n _, dump1 = self._debug_run_and_get_dump(\n sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)\n self.assertEqual(1, dump1.core_metadata.global_step)\n self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)\n self.assertEqual(0, dump1.core_metadata.executor_step_index)\n self.assertEqual([ph.name], dump1.core_metadata.input_names)\n self.assertEqual([x.name], dump1.core_metadata.output_names)\n self.assertEqual([], dump1.core_metadata.target_nodes)\n shutil.rmtree(self._dump_root)\n\n # Calling run() with the same feed, same output and same debug watch\n # options should increment both session_run_index and\n # executor_step_index.\n _, dump2 = self._debug_run_and_get_dump(\n sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)\n self.assertEqual(2, dump2.core_metadata.global_step)\n self.assertEqual(dump1.core_metadata.session_run_index + 1,\n dump2.core_metadata.session_run_index)\n self.assertEqual(dump1.core_metadata.executor_step_index + 1,\n dump2.core_metadata.executor_step_index)\n self.assertEqual([ph.name], dump2.core_metadata.input_names)\n self.assertEqual([x.name], dump2.core_metadata.output_names)\n self.assertEqual([], dump2.core_metadata.target_nodes)\n shutil.rmtree(self._dump_root)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)\n\n # Calling run() with a different output should increment\n # session_run_index, but not executor_step_index.\n _, dump3 = self._debug_run_and_get_dump(\n sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)\n self.assertEqual(3, dump3.core_metadata.global_step)\n self.assertEqual(dump2.core_metadata.session_run_index + 1,\n dump3.core_metadata.session_run_index)\n self.assertEqual(0, dump3.core_metadata.executor_step_index)\n self.assertEqual([ph.name], dump3.core_metadata.input_names)\n self.assertEqual([y.name], dump3.core_metadata.output_names)\n self.assertEqual([], dump3.core_metadata.target_nodes)\n\n def testDebuggingDuringOpError(self):\n \"\"\"Test the debug tensor dumping when error occurs in graph runtime.\"\"\"\n\n with session.Session() as sess:\n ph = array_ops.placeholder(dtypes.float32, name=\"mismatch/ph\")\n x = array_ops.transpose(ph, name=\"mismatch/x\")\n m = constant_op.constant(\n np.array(\n [[1.0, 2.0]], dtype=np.float32), name=\"mismatch/m\")\n y = math_ops.matmul(m, x, name=\"mismatch/y\")\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n with self.assertRaises(errors.OpError):\n sess.run(y,\n options=run_options,\n feed_dict={ph: np.array([[-3.0], [0.0]])})\n\n dump = debug_data.DebugDumpDir(self._dump_root)\n\n self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)\n self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)\n self.assertEqual([ph.name], dump.core_metadata.input_names)\n self.assertEqual([y.name], dump.core_metadata.output_names)\n self.assertEqual([], dump.core_metadata.target_nodes)\n\n # Despite the fact that the run() call errored out and partition_graphs\n # are not available via run_metadata, the partition graphs should still\n # have been loaded from the dump directory.\n self.assertTrue(dump.loaded_partition_graphs())\n\n m_dumps = dump.watch_key_to_data(\"mismatch/m:0:DebugIdentity\")\n self.assertEqual(1, len(m_dumps))\n self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())\n\n x_dumps = dump.watch_key_to_data(\"mismatch/x:0:DebugIdentity\")\n self.assertEqual(1, len(x_dumps))\n self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())\n\n def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n a = variables.Variable(\n [\n np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,\n -np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan\n ],\n dtype=np.float32,\n name=\"numeric_summary/a\")\n b = variables.Variable(\n [0.0] * 18, dtype=np.float32, name=\"numeric_summary/b\")\n c = math_ops.add(a, b, name=\"numeric_summary/c\")\n\n sess.run(variables.global_variables_initializer())\n\n _, dump = self._debug_run_and_get_dump(\n sess, c, debug_ops=[\"DebugNumericSummary\"])\n self.assertTrue(dump.loaded_partition_graphs())\n\n self.assertAllClose([[\n 1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,\n 8.97959184, 1.0, 1.0, 18.0\n ]], dump.get_tensors(\"numeric_summary/a/read\", 0, \"DebugNumericSummary\"))\n\n def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):\n with session.Session() as sess:\n a = variables.Variable(\n [42], dtype=np.float32, name=\"numeric_summary_uninit/a\")\n\n _, dump = self._debug_run_and_get_dump(\n sess, a.initializer, debug_ops=[\"DebugNumericSummary\"])\n\n self.assertTrue(dump.loaded_partition_graphs())\n\n # DebugNumericSummary output should reflect the uninitialized state of\n # the watched tensor.\n numeric_summary = dump.get_tensors(\"numeric_summary_uninit/a\", 0,\n \"DebugNumericSummary\")[0]\n self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n numeric_summary[0:8])\n # Check dtype (index 12), ndims (index 13) and dimension sizes (index\n # 14+).\n self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])\n self.assertTrue(np.isinf(numeric_summary[8]))\n self.assertGreater(numeric_summary[8], 0.0)\n self.assertTrue(np.isinf(numeric_summary[9]))\n self.assertLess(numeric_summary[9], 0.0)\n self.assertTrue(np.isnan(numeric_summary[10]))\n self.assertTrue(np.isnan(numeric_summary[11]))\n\n def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):\n with session.Session() as sess:\n a = variables.Variable(\"1\", name=\"a\")\n b = variables.Variable(\"3\", name=\"b\")\n c = variables.Variable(\"2\", name=\"c\")\n\n d = math_ops.add(a, b, name=\"d\")\n e = math_ops.add(d, c, name=\"e\")\n n = parsing_ops.string_to_number(e, name=\"n\")\n m = math_ops.add(n, n, name=\"m\")\n\n sess.run(variables.global_variables_initializer())\n\n # Using DebugNumericSummary on sess.run(m) with the default\n # tolerate_debug_op_creation_failures=False should error out due to the\n # presence of string-dtype Tensors in the graph.\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=self._debug_urls())\n with self.assertRaises(errors.FailedPreconditionError):\n sess.run(m, options=run_options, run_metadata=run_metadata)\n\n # Using tolerate_debug_op_creation_failures=True should get rid of the\n # error.\n m_result, dump = self._debug_run_and_get_dump(\n sess, m, debug_ops=[\"DebugNumericSummary\"],\n tolerate_debug_op_creation_failures=True)\n self.assertEqual(264, m_result)\n\n # The integer-dtype Tensors in the graph should have been dumped\n # properly.\n self.assertIn(\"n:0:DebugNumericSummary\", dump.debug_watch_keys(\"n\"))\n self.assertIn(\"m:0:DebugNumericSummary\", dump.debug_watch_keys(\"m\"))\n\n def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n a = variables.Variable(10.0, name=\"a\")\n b = variables.Variable(0.0, name=\"b\")\n c = variables.Variable(0.0, name=\"c\")\n\n x = math_ops.divide(a, b, name=\"x\")\n y = math_ops.multiply(x, c, name=\"y\")\n\n sess.run(variables.global_variables_initializer())\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary(foo=1.0)\"],\n debug_urls=self._debug_urls())\n with self.assertRaisesRegexp(\n errors.FailedPreconditionError,\n r\"1 attribute key\\(s\\) were not valid for debug node \"\n r\"__dbg_.:0_0_DebugNumericSummary: foo\"):\n sess.run(y, options=run_options, run_metadata=run_metadata)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary(foo=1.0; bar=false)\"],\n debug_urls=self._debug_urls())\n with self.assertRaisesRegexp(\n errors.FailedPreconditionError,\n r\"2 attribute key\\(s\\) were not valid for debug node \"\n r\"__dbg_.:0_0_DebugNumericSummary:\"):\n sess.run(y, options=run_options, run_metadata=run_metadata)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary(foo=1.0; mute_if_healthy=true)\"],\n debug_urls=self._debug_urls())\n with self.assertRaisesRegexp(\n errors.FailedPreconditionError,\n r\"1 attribute key\\(s\\) were not valid for debug node \"\n r\"__dbg_.:0_0_DebugNumericSummary: foo\"):\n sess.run(y, options=run_options, run_metadata=run_metadata)\n\n def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):\n with session.Session(config=no_rewrite_session_config()) as sess:\n a = variables.Variable(10.0, name=\"a\")\n b = variables.Variable(0.0, name=\"b\")\n c = variables.Variable(0.0, name=\"c\")\n\n x = math_ops.divide(a, b, name=\"x\")\n y = math_ops.multiply(x, c, name=\"y\")\n\n sess.run(variables.global_variables_initializer())\n\n # Here, validate=False is necessary to avoid causality check error.\n # TODO(cais): Maybe let DebugDumpDir constructor automatically ignore\n # debug ops with mute_if_healthy=false attribute during validation.\n _, dump = self._debug_run_and_get_dump(\n sess, y, debug_ops=[\"DebugNumericSummary(mute_if_healthy=true)\"],\n validate=False)\n\n self.assertEqual(2, dump.size)\n self.assertAllClose([[\n 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,\n np.nan, 1.0, 0.0\n ]], dump.get_tensors(\"x\", 0, \"DebugNumericSummary\"))\n self.assertAllClose([[\n 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,\n np.nan, 1.0, 0.0\n ]], dump.get_tensors(\"y\", 0, \"DebugNumericSummary\"))\n\n # Another run with the default mute_if_healthy (false) value should\n # dump all the tensors.\n shutil.rmtree(self._dump_root)\n _, dump = self._debug_run_and_get_dump(\n sess, y, debug_ops=[\"DebugNumericSummary()\"])\n self.assertEqual(8, dump.size)\n\n def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):\n with session.Session() as sess:\n a = variables.Variable([10.0, 10.0], name=\"a\")\n b = variables.Variable([10.0, 2.0], name=\"b\")\n\n x = math_ops.add(a, b, name=\"x\") # [20.0, 12.0]\n y = math_ops.divide(x, b, name=\"y\") # [2.0, 6.0]\n\n sess.run(variables.global_variables_initializer())\n\n # Here, validate=False is necessary to avoid causality check error.\n # TODO(cais): Maybe let DebugDumpDir constructor automatically ignore\n # debug ops with mute_if_healthy=false attribute during validation.\n _, dump = self._debug_run_and_get_dump(\n sess, y, debug_ops=[\n \"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)\"],\n validate=False)\n\n self.assertEqual(1, dump.size)\n self.assertAllClose([[\n 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,\n 1.0, 2.0]], dump.get_tensors(\"x\", 0, \"DebugNumericSummary\"))\n\n def testDebugQueueOpsDoesNotoErrorOut(self):\n with session.Session() as sess:\n q = data_flow_ops.FIFOQueue(3, \"float\", name=\"fifo_queue\")\n q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name=\"enqueue_many\")\n\n _, dump = self._debug_run_and_get_dump(sess, q_init)\n self.assertTrue(dump.loaded_partition_graphs())\n\n fifo_queue_tensor = dump.get_tensors(\"fifo_queue\", 0, \"DebugIdentity\")[0]\n self.assertIsInstance(fifo_queue_tensor,\n debug_data.InconvertibleTensorProto)\n self.assertTrue(fifo_queue_tensor.initialized)\n self.assertAllClose(\n [101.0, 202.0, 303.0],\n dump.get_tensors(\"enqueue_many/component_0\", 0, \"DebugIdentity\")[0])\n\n def testLookUpNodePythonTracebackWorks(self):\n with session.Session() as sess:\n u_init = constant_op.constant(10.0)\n u = variables.Variable(u_init, name=\"traceback/u\")\n v_init = constant_op.constant(20.0)\n v = variables.Variable(v_init, name=\"traceback/v\")\n\n w = math_ops.multiply(u, v, name=\"traceback/w\")\n\n sess.run(variables.global_variables_initializer())\n _, dump = self._debug_run_and_get_dump(sess, w)\n\n # Prior to setting the Python graph, attempts to do traceback lookup\n # should lead to exceptions.\n with self.assertRaisesRegexp(\n LookupError, \"Python graph is not available for traceback lookup\"):\n dump.node_traceback(\"traceback/w\")\n\n dump.set_python_graph(sess.graph)\n\n # After setting the Python graph, attempts to look up nonexistent nodes\n # should lead to exceptions.\n with self.assertRaisesRegexp(KeyError,\n r\"Cannot find node \\\"foo\\\" in Python graph\"):\n dump.node_traceback(\"foo\")\n\n # Lookup should work with node name input.\n traceback = dump.node_traceback(\"traceback/w\")\n self.assertIsInstance(traceback, list)\n self.assertGreater(len(traceback), 0)\n for trace in traceback:\n self.assertIsInstance(trace, tuple)\n\n # Lookup should also work with tensor name input.\n traceback = dump.node_traceback(\"traceback/w:0\")\n self.assertIsInstance(traceback, list)\n self.assertGreater(len(traceback), 0)\n for trace in traceback:\n self.assertIsInstance(trace, tuple)\n\n\nclass DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):\n \"\"\"Test for debugging concurrent Session.run() calls.\"\"\"\n\n def _get_concurrent_debug_urls(self):\n \"\"\"Abstract method to generate debug URLs for concurrent debugged runs.\"\"\"\n raise NotImplementedError(\n \"_get_concurrent_debug_urls is not implemented in the base test class\")\n\n def testDebugConcurrentVariableUpdates(self):\n if test.is_gpu_available():\n self.skipTest(\"No testing concurrent runs on a single GPU.\")\n\n with session.Session() as sess:\n v = variables.Variable(30.0, name=\"v\")\n constants = []\n for i in xrange(self._num_concurrent_runs):\n constants.append(constant_op.constant(1.0, name=\"c%d\" % i))\n incs = [\n state_ops.assign_add(\n v, c, use_locking=True, name=(\"inc%d\" % i))\n for (i, c) in enumerate(constants)\n ]\n sess.run(v.initializer)\n\n concurrent_debug_urls = self._get_concurrent_debug_urls()\n\n def inc_job(index):\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=concurrent_debug_urls[index])\n for _ in xrange(100):\n sess.run(incs[index], options=run_options)\n\n inc_threads = []\n for index in xrange(self._num_concurrent_runs):\n inc_thread = threading.Thread(target=functools.partial(inc_job, index))\n inc_thread.start()\n inc_threads.append(inc_thread)\n for inc_thread in inc_threads:\n inc_thread.join()\n\n self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,\n sess.run(v))\n\n all_session_run_indices = []\n for index in xrange(self._num_concurrent_runs):\n dump = debug_data.DebugDumpDir(self._dump_roots[index])\n self.assertTrue(dump.loaded_partition_graphs())\n\n v_data = dump.get_tensors(\"v\", 0, \"DebugIdentity\")\n self.assertEqual(100, len(v_data))\n\n # Examine all the core metadata files\n core_metadata_files = glob.glob(\n os.path.join(self._dump_roots[index], \"_tfdbg_core*\"))\n\n timestamps = []\n session_run_indices = []\n executor_step_indices = []\n for core_metadata_file in core_metadata_files:\n with open(core_metadata_file, \"rb\") as f:\n event = event_pb2.Event()\n event.ParseFromString(f.read())\n core_metadata = (\n debug_data.extract_core_metadata_from_event_proto(event))\n timestamps.append(event.wall_time)\n session_run_indices.append(core_metadata.session_run_index)\n executor_step_indices.append(core_metadata.executor_step_index)\n\n all_session_run_indices.extend(session_run_indices)\n\n # Assert that executor_step_index increases by one at a time.\n executor_step_indices = zip(timestamps, executor_step_indices)\n executor_step_indices = sorted(\n executor_step_indices, key=lambda x: x[0])\n for i in xrange(len(executor_step_indices) - 1):\n self.assertEquals(executor_step_indices[i][1] + 1,\n executor_step_indices[i + 1][1])\n\n # Assert that session_run_index increase monotonically.\n session_run_indices = zip(timestamps, session_run_indices)\n session_run_indices = sorted(session_run_indices, key=lambda x: x[0])\n for i in xrange(len(session_run_indices) - 1):\n self.assertGreater(session_run_indices[i + 1][1],\n session_run_indices[i][1])\n\n # Assert that the session_run_indices from the concurrent run() calls are\n # all unique.\n self.assertEqual(len(all_session_run_indices),\n len(set(all_session_run_indices)))\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.eager.backprop.gradients_function", "tensorflow.python.eager.test.main", "tensorflow.python.ops.array_ops.split", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.eager.backprop.val_and_grad_function", "tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.eager.tensor.Tensor", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.learn.python.learn.estimators.model_fn.ModelFnOps", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.get_input_alternatives", "tensorflow.python.ops.variables._all_saveable_objects", "tensorflow.contrib.learn.python.learn.learn_io.data_feeder.setup_train_data_feeder", "tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.get_timestamped_export_dir", "numpy.concatenate", "tensorflow.contrib.layers.infer_real_valued_columns", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.training.monitored_session.MonitoredTrainingSession", "tensorflow.core.framework.summary_pb2.Summary.FromString", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures", "tensorflow.contrib.framework.list_variables", "tensorflow.python.training.saver.Saver", "tensorflow.python.training.saver.latest_checkpoint", "tensorflow.python.framework.ops.get_collection", "tensorflow.contrib.meta_graph_transform.meta_graph_transform.meta_graph_transform", "tensorflow.python.training.device_setter.replica_device_setter", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_signatures", "tensorflow.python.platform.gfile.MakeDirs", "tensorflow.contrib.learn.python.learn.utils.export._export_estimator", "tensorflow.contrib.framework.load_variable", "tensorflow.python.training.basic_session_run_hooks.FeedFnHook", "tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks", "tensorflow.python.training.basic_session_run_hooks.LoggingTensorHook", "tensorflow.python.util.tf_decorator.unwrap", "tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_example_parser_from_signatures", "tensorflow.contrib.framework.deprecated_args", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.saved_model.builder.SavedModelBuilder", "tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.get_output_alternatives", "tensorflow.python.training.monitored_session.ChiefSessionCreator", "tensorflow.python.training.basic_session_run_hooks.NanTensorHook", "tensorflow.python.ops.resources.shared_resources", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.ops.lookup_ops.tables_initializer", "tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.get_temp_export_dir", "tensorflow.python.platform.gfile.Copy", "tensorflow.python.client.session.Session", "tensorflow.python.training.training_util.create_global_step", "tensorflow.contrib.metrics.streaming_mean", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.contrib.framework.deprecated", "tensorflow.contrib.training.python.training.evaluation.evaluate_once", "tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.build_all_signature_defs", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.platform.gfile.Rename", "tensorflow.python.framework.ops.Graph", "tensorflow.python.summary.summary.FileWriterCache.get", "tensorflow.contrib.learn.python.learn.estimators._sklearn.NotFittedError", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.summary.summary.FileWriterCache.clear", "tensorflow.python.training.basic_session_run_hooks.StopAtStepHook", "tensorflow.core.framework.summary_pb2.Summary", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.contrib.training.python.training.evaluation.StopAfterNEvalsHook" ], [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.ops.math_ops.subtract", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.squeeze", "numpy.random.randn", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.debug.lib.debug_utils.add_debug_tensor_watch", "numpy.random.randint", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.debug.lib.debug_data.extract_core_metadata_from_event_proto", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.math_ops.divide", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.math_ops.add", "tensorflow.core.util.event_pb2.Event", "tensorflow.python.platform.googletest.main", "tensorflow.python.debug.lib.debug_data.DebugDumpDir", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.unique", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.debug.lib.debug_data.is_copy_node", "tensorflow.python.ops.math_ops.reduce_sum", "numpy.isnan", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.math_ops.div", "tensorflow.python.client.session.Session", "numpy.array", "tensorflow.python.ops.data_flow_ops.FIFOQueue", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "tensorflow.python.ops.parsing_ops.string_to_number", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.python.ops.array_ops.unique_with_counts", "numpy.isinf", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.debug.lib.debug_utils.watch_graph", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.framework.test_util.gpu_device_name", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "0.12", "1.0" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
LuizPitaAlmeida/image_caption_generator
[ "e368b9f23ef283856a42f78b724d3181245b27de", "e368b9f23ef283856a42f78b724d3181245b27de" ]
[ "src/utils/hardware_stats.py", "src/data_process/data_loader.py" ]
[ "import torch\nimport nvidia_smi\nimport psutil\n\n\nclass HardwareStats():\n def __init__(self):\n super().__init__()\n self.device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n nvidia_smi.nvmlInit()\n self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)\n\n def hardware_stats(self):\n \"\"\"\n Returns a dict containing some hardware related stats\n \"\"\"\n res = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)\n return {\"cpu\": f\"{str(psutil.cpu_percent())}%\",\n \"mem\": f\"{str(psutil.virtual_memory().percent)}%\",\n \"gpu\": f\"{str(res.gpu)}%\",\n \"gpu_mem\": f\"{str(res.memory)}%\"}\n", "\"\"\"data_loader\nDo a torch dataloader of Coco Captions Dataset\n\nMinor modifications in Yunjey Choi code that can be found in:\n<https://github.com/yunjey/pytorch-tutorial>\nThanks Yunjey Choi.\n\"\"\"\nimport torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nfrom pycocotools.coco import COCO\n\nfrom build_vocab import Vocabulary\n\n\nclass CocoDataset(data.Dataset):\n \"\"\"COCO Custom Dataset compatible with torch.utils.data.DataLoader.\"\"\"\n def __init__(self, root, json, vocab, transform=None):\n \"\"\"Set the path for images, captions and vocabulary wrapper.\n\n Args:\n root: image directory.\n json: coco annotation file path.\n vocab: vocabulary wrapper.\n transform: image transformer.\n \"\"\"\n self.root = root\n self.coco = COCO(json)\n self.ids = list(self.coco.anns.keys())\n self.vocab = vocab\n self.transform = transform\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (image and caption).\"\"\"\n coco = self.coco\n vocab = self.vocab\n ann_id = self.ids[index]\n caption = coco.anns[ann_id]['caption']\n img_id = coco.anns[ann_id]['image_id']\n path = coco.loadImgs(img_id)[0]['file_name']\n\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n return image, target\n\n def __len__(self):\n return len(self.ids)\n\n\ndef collate_fn(data):\n \"\"\"Creates mini-batch tensors from the list of tuples (image, caption).\n\n We should build custom collate_fn rather than using default collate_fn,\n because merging caption (including padding) is not supported in default.\n\n Args:\n data: list of tuple (image, caption).\n - image: torch tensor of shape (3, 256, 256).\n - caption: torch tensor of shape (?); variable length.\n\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n targets: torch tensor of shape (batch_size, padded_length).\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length (descending order).\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images, targets, lengths\n\n\ndef get_loader(root, json, vocab, transform, batch_size, shuffle, num_workers):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n # COCO caption dataset\n coco = CocoDataset(root=root,\n json=json,\n vocab=vocab,\n transform=transform)\n\n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for each iteration.\n # images: a tensor of shape (batch_size, 3, 224, 224).\n # captions: a tensor of shape (batch_size, padded_length).\n # lengths: a list indicating valid length for each caption. length is\n # (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=coco,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.stack", "torch.utils.data.DataLoader", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
javierlorenzod/pytorch-lightning
[ "6dba26666aa564db414eb238d99a4213006d8220", "6dba26666aa564db414eb238d99a4213006d8220" ]
[ "pytorch_lightning/trainer/training_loop.py", "tests/metrics/test_metric.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerState\nfrom pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing\nfrom pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import recursive_detach\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode):\n self.trainer = trainer\n self.early_stopping_accumulator = None\n self.checkpoint_accumulator = None\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n\n def on_trainer_init(\n self,\n max_epochs,\n min_epochs,\n max_steps,\n min_steps,\n num_sanity_val_steps,\n automatic_optimization,\n weights_summary,\n ):\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.interrupted = False\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n self.automatic_optimization = automatic_optimization\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n self.trainer.weights_summary = weights_summary\n if weights_summary is not None and weights_summary not in ModelSummary.MODES:\n raise MisconfigurationException(\n f\"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}\"\n )\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n # provide rank to profiler\n self.trainer.profile_connector.on_train_start(self.trainer)\n\n def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n # attach model log function to callback\n self.trainer.callback_connector.attach_model_logging_functions(model)\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n if self.trainer.global_rank == 0:\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def check_early_stopping_callback(self, should_update):\n # TODO bake this logic into the EarlyStopping callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.get_model()\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # structured result accumulators for callbacks\n self.early_stopping_accumulator = Accumulator()\n self.checkpoint_accumulator = Accumulator()\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n # hook\n self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n hook_overridden = (\n is_overridden(\"training_epoch_end\", model=self.trainer.get_model())\n or is_overridden(\"on_train_epoch_end\", model=self.trainer.get_model())\n )\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n is_result_obj = isinstance(training_step_output, Result)\n\n if is_result_obj:\n training_step_output.detach()\n else:\n training_step_output.batch_loss = training_step_output.batch_loss.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.get_model()\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator_backend.training_step(args)\n self.trainer.accelerator_backend.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n is_result_obj = isinstance(training_step_output, Result)\n\n if training_step_output_for_epoch_end is None:\n return None\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.train_loop.automatic_optimization:\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n if is_result_obj:\n closure_loss = training_step_output.minimize\n else:\n closure_loss = training_step_output.batch_loss\n\n closure_loss = closure_loss / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n hiddens=training_step_output.hiddens,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n # -----------------------------------------\n # process result return (DEPRECATE in 1.0)\n # -----------------------------------------\n if isinstance(training_step_output, Result):\n training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)\n return training_step_output_for_epoch_end, training_step_output\n\n # -----------------------------------------\n # process hybrid (1.0)\n # -----------------------------------------\n # no need for these checks in 1.0.0\n # TODO: remove checks in 1.0.0\n is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)\n is_1_0_output = is_tensor or (\"log\" not in training_step_output and \"progress_bar\" not in training_step_output)\n if is_1_0_output:\n return self._process_training_step_output_1_0(training_step_output, split_batch)\n\n # -----------------------------------------\n # process old dict (deprecate 1.0)\n # -----------------------------------------\n training_step_output = self.trainer.process_dict_result(training_step_output, train=True)\n\n training_step_output = AttributeDict(\n batch_loss=training_step_output[0],\n pbar_on_batch_end=training_step_output[1],\n log_metrics=training_step_output[2],\n callback_metrics=training_step_output[3],\n hiddens=training_step_output[4],\n )\n # if the user decides to finally reduce things in epoch_end, save raw output without graphs\n if isinstance(training_step_output_for_epoch_end, torch.Tensor):\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n else:\n training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_training_step_output_1_0(self, training_step_output, split_batch):\n result = self.trainer.get_model()._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n result[\"extra\"] = {}\n\n # map to results under the hood\n result.minimize = loss\n result.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end.cpu()\n\n # what flows back into the system\n training_step_output = result\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_result(self, training_step_output, split_batch):\n training_step_output.track_batch_size(len(split_batch))\n m = \"\"\"\n TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.\n Use self.log and .write from the LightningModule to log metrics and write predictions.\n training_step can now only return a scalar (for the loss) or a dictionary with anything you want.\n\n Option 1:\n return loss\n\n Option 2:\n return {'loss': loss, 'anything_else': ...}\n\n Option 3:\n return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}\n \"\"\"\n rank_zero_warn(m)\n\n training_step_output_for_epoch_end = copy(training_step_output)\n training_step_output_for_epoch_end.detach()\n\n return training_step_output_for_epoch_end\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.get_model()\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.get_model()\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def process_hiddens(self, opt_closure_result):\n hiddens = opt_closure_result.hiddens\n if isinstance(opt_closure_result.training_step_output, Result):\n opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()\n return hiddens\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.get_model()\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n should_check_val = False\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n batch_end_outputs = self.process_train_step_outputs(\n batch_output.training_step_output_for_epoch_end,\n self.early_stopping_accumulator,\n self.checkpoint_accumulator,\n )\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.run_evaluation()\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # epoch end hook\n self.run_on_epoch_end_hook(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(\n epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers\n )\n\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n if should_check_val:\n self.trainer.run_evaluation(on_epoch=True)\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n if should_train_only:\n # update epoch level lr_schedulers\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n self.check_checkpoint_callback(True)\n self.check_early_stopping_callback(True)\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in self.prepare_optimizers():\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # track hiddens\n self.trainer.hiddens = self.process_hiddens(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if result is None:\n if self.automatic_optimization:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n return None\n\n if not self._skip_backward and self.trainer.train_loop.automatic_optimization:\n # backward pass\n with self.trainer.profiler.profile(\"model_backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(result.loss)\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.get_model().untoggle_optimizer(opt_idx)\n\n return result\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator_backend.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def run_on_epoch_end_hook(self, epoch_output):\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n self.trainer.call_hook('on_train_epoch_end', epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step += 1\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches\n\n should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop\n or is_last_batch_for_infinite_dataset\n ) if on_epoch else (is_val_check_batch and not epoch_end_val_check)\n\n return should_check_val and can_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n args.append(opt_idx)\n else:\n num_opts = len(self.trainer.optimizers)\n raise ValueError(\n f\"Your LightningModule defines {num_opts} optimizers but \"\n f'training_step is missing the \"optimizer_idx\" argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):\n \"\"\"\n Figure out what needs to be tracked/logged at the end of the epoch\n \"\"\"\n\n # the training step outputs a list per optimizer. The list contains the outputs at each time step\n # when no TBPTT is used, then the list has 1 item per batch\n # when TBPTT IS used, then the list has n items (1 per time step)\n batch_end_outputs = []\n for optimizer_idx_outputs in all_train_step_outputs:\n # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer\n if len(optimizer_idx_outputs) == 0:\n continue\n\n sample_output = optimizer_idx_outputs[-1]\n\n # pull out callback info if available (ie: Results object)\n if isinstance(sample_output, dict) and \"early_stop_on\" in sample_output:\n early_stopping_accumulator.accumulate(sample_output[\"early_stop_on\"])\n\n if isinstance(sample_output, dict) and \"checkpoint_on\" in sample_output:\n checkpoint_accumulator.accumulate(sample_output[\"checkpoint_on\"])\n\n batch_end_outputs.append(optimizer_idx_outputs)\n\n return batch_end_outputs\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.get_model()\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n", "import pickle\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\n\nimport cloudpickle\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom pytorch_lightning.metrics.metric import Metric, MetricCollection\n\ntorch.manual_seed(42)\n\n\nclass Dummy(Metric):\n name = \"Dummy\"\n\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", torch.tensor(0.0), dist_reduce_fx=None)\n\n def update(self):\n pass\n\n def compute(self):\n pass\n\n\nclass DummyList(Metric):\n name = \"DummyList\"\n\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", list(), dist_reduce_fx=None)\n\n def update(self):\n pass\n\n def compute(self):\n pass\n\n\ndef test_inherit():\n Dummy()\n\n\ndef test_add_state():\n a = Dummy()\n\n a.add_state(\"a\", torch.tensor(0), \"sum\")\n assert a._reductions[\"a\"](torch.tensor([1, 1])) == 2\n\n a.add_state(\"b\", torch.tensor(0), \"mean\")\n assert np.allclose(a._reductions[\"b\"](torch.tensor([1.0, 2.0])).numpy(), 1.5)\n\n a.add_state(\"c\", torch.tensor(0), \"cat\")\n assert a._reductions[\"c\"]([torch.tensor([1]), torch.tensor([1])]).shape == (2, )\n\n with pytest.raises(ValueError):\n a.add_state(\"d1\", torch.tensor(0), 'xyz')\n\n with pytest.raises(ValueError):\n a.add_state(\"d2\", torch.tensor(0), 42)\n\n with pytest.raises(ValueError):\n a.add_state(\"d3\", [torch.tensor(0)], 'sum')\n\n with pytest.raises(ValueError):\n a.add_state(\"d4\", 42, 'sum')\n\n def custom_fx(x):\n return -1\n\n a.add_state(\"e\", torch.tensor(0), custom_fx)\n assert a._reductions[\"e\"](torch.tensor([1, 1])) == -1\n\n\ndef test_add_state_persistent():\n a = Dummy()\n\n a.add_state(\"a\", torch.tensor(0), \"sum\", persistent=True)\n assert \"a\" in a.state_dict()\n\n a.add_state(\"b\", torch.tensor(0), \"sum\", persistent=False)\n\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.6.0\"):\n assert \"b\" not in a.state_dict()\n\n\ndef test_reset():\n\n class A(Dummy):\n pass\n\n class B(DummyList):\n pass\n\n a = A()\n assert a.x == 0\n a.x = torch.tensor(5)\n a.reset()\n assert a.x == 0\n\n b = B()\n assert isinstance(b.x, list) and len(b.x) == 0\n b.x = torch.tensor(5)\n b.reset()\n assert isinstance(b.x, list) and len(b.x) == 0\n\n\ndef test_update():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n a = A()\n assert a.x == 0\n assert a._computed is None\n a.update(1)\n assert a._computed is None\n assert a.x == 1\n a.update(2)\n assert a.x == 3\n assert a._computed is None\n\n\ndef test_compute():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n a = A()\n assert 0 == a.compute()\n assert 0 == a.x\n a.update(1)\n assert a._computed is None\n assert a.compute() == 1\n assert a._computed == 1\n a.update(2)\n assert a._computed is None\n assert a.compute() == 3\n assert a._computed == 3\n\n # called without update, should return cached value\n a._computed = 5\n assert a.compute() == 5\n\n\ndef test_forward():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n a = A()\n assert a(5) == 5\n assert a._forward_cache == 5\n\n assert a(8) == 8\n assert a._forward_cache == 8\n\n assert a.compute() == 13\n\n\nclass DummyMetric1(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n\nclass DummyMetric2(Dummy):\n\n def update(self, y):\n self.x -= y\n\n def compute(self):\n return self.x\n\n\ndef test_pickle(tmpdir):\n # doesn't tests for DDP\n a = DummyMetric1()\n a.update(1)\n\n metric_pickled = pickle.dumps(a)\n metric_loaded = pickle.loads(metric_pickled)\n\n assert metric_loaded.compute() == 1\n\n metric_loaded.update(5)\n assert metric_loaded.compute() == 6\n\n metric_pickled = cloudpickle.dumps(a)\n metric_loaded = cloudpickle.loads(metric_pickled)\n\n assert metric_loaded.compute() == 1\n\n\ndef test_state_dict(tmpdir):\n \"\"\" test that metric states can be removed and added to state dict \"\"\"\n metric = Dummy()\n assert metric.state_dict() == OrderedDict()\n metric.persistent(True)\n assert metric.state_dict() == OrderedDict(x=0)\n metric.persistent(False)\n assert metric.state_dict() == OrderedDict()\n\n\ndef test_child_metric_state_dict():\n \"\"\" test that child metric states will be added to parent state dict \"\"\"\n\n class TestModule(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.metric = Dummy()\n self.metric.add_state('a', torch.tensor(0), persistent=True)\n self.metric.add_state('b', [], persistent=True)\n self.metric.register_buffer('c', torch.tensor(0))\n\n module = TestModule()\n expected_state_dict = {\n 'metric.a': torch.tensor(0),\n 'metric.b': [],\n 'metric.c': torch.tensor(0),\n }\n assert module.state_dict() == expected_state_dict\n\n\[email protected](not torch.cuda.is_available(), reason=\"Test requires GPU.\")\ndef test_device_and_dtype_transfer(tmpdir):\n metric = DummyMetric1()\n assert metric.x.is_cuda is False\n assert metric.x.dtype == torch.float32\n\n metric = metric.to(device='cuda')\n assert metric.x.is_cuda\n\n metric = metric.double()\n assert metric.x.dtype == torch.float64\n\n metric = metric.half()\n assert metric.x.dtype == torch.float16\n\n\ndef test_metric_collection(tmpdir):\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n\n # Test correct dict structure\n assert len(metric_collection) == 2\n assert metric_collection['DummyMetric1'] == m1\n assert metric_collection['DummyMetric2'] == m2\n\n # Test correct initialization\n for name, metric in metric_collection.items():\n assert metric.x == 0, f'Metric {name} not initialized correctly'\n\n # Test every metric gets updated\n metric_collection.update(5)\n for name, metric in metric_collection.items():\n assert metric.x.abs() == 5, f'Metric {name} not updated correctly'\n\n # Test compute on each metric\n metric_collection.update(-5)\n metric_vals = metric_collection.compute()\n assert len(metric_vals) == 2\n for name, metric_val in metric_vals.items():\n assert metric_val == 0, f'Metric {name}.compute not called correctly'\n\n # Test that everything is reset\n for name, metric in metric_collection.items():\n assert metric.x == 0, f'Metric {name} not reset correctly'\n\n # Test pickable\n metric_pickled = pickle.dumps(metric_collection)\n metric_loaded = pickle.loads(metric_pickled)\n assert isinstance(metric_loaded, MetricCollection)\n\n\[email protected](not torch.cuda.is_available(), reason=\"Test requires GPU.\")\ndef test_device_and_dtype_transfer_metriccollection(tmpdir):\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n for _, metric in metric_collection.items():\n assert metric.x.is_cuda is False\n assert metric.x.dtype == torch.float32\n\n metric_collection = metric_collection.to(device='cuda')\n for _, metric in metric_collection.items():\n assert metric.x.is_cuda\n\n metric_collection = metric_collection.double()\n for _, metric in metric_collection.items():\n assert metric.x.dtype == torch.float64\n\n metric_collection = metric_collection.half()\n for _, metric in metric_collection.items():\n assert metric.x.dtype == torch.float16\n\n\ndef test_metric_collection_wrong_input(tmpdir):\n \"\"\" Check that errors are raised on wrong input \"\"\"\n m1 = DummyMetric1()\n\n # Not all input are metrics (list)\n with pytest.raises(ValueError):\n _ = MetricCollection([m1, 5])\n\n # Not all input are metrics (dict)\n with pytest.raises(ValueError):\n _ = MetricCollection({'metric1': m1, 'metric2': 5})\n\n # Same metric passed in multiple times\n with pytest.raises(ValueError, match='Encountered two metrics both named *.'):\n _ = MetricCollection([m1, m1])\n\n # Not a list or dict passed in\n with pytest.raises(ValueError, match='Unknown input to MetricCollection.'):\n _ = MetricCollection(m1)\n\n\ndef test_metric_collection_args_kwargs(tmpdir):\n \"\"\" Check that args and kwargs gets passed correctly in metric collection,\n Checks both update and forward method\n \"\"\"\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n\n # args gets passed to all metrics\n metric_collection.update(5)\n assert metric_collection['DummyMetric1'].x == 5\n assert metric_collection['DummyMetric2'].x == -5\n metric_collection.reset()\n _ = metric_collection(5)\n assert metric_collection['DummyMetric1'].x == 5\n assert metric_collection['DummyMetric2'].x == -5\n metric_collection.reset()\n\n # kwargs gets only passed to metrics that it matches\n metric_collection.update(x=10, y=20)\n assert metric_collection['DummyMetric1'].x == 10\n assert metric_collection['DummyMetric2'].x == -20\n metric_collection.reset()\n _ = metric_collection(x=10, y=20)\n assert metric_collection['DummyMetric1'].x == 10\n assert metric_collection['DummyMetric2'].x == -20\n" ]
[ [ "numpy.argmax", "numpy.cumsum" ], [ "torch.manual_seed", "torch.cuda.is_available", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adrianbouza/automl
[ "46dbd753efc8efc73ced146fe8b3bb694709dcff", "46dbd753efc8efc73ced146fe8b3bb694709dcff", "46dbd753efc8efc73ced146fe8b3bb694709dcff" ]
[ "efficientdet/keras/infer_lib.py", "efficientdet/keras/wbf_test.py", "efficientdet/efficientdet_arch.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Inference related utilities.\"\"\"\nimport copy\nimport os\nimport time\nfrom typing import Text, Dict, Any, Optional\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom .. import dataloader\nfrom .. import hparams_config\nfrom .. import utils\nfrom . import efficientdet_keras\nfrom . import label_util\nfrom . import util_keras\nfrom ..visualize import vis_utils\n\n\ndef visualize_image(image,\n boxes,\n classes,\n scores,\n label_map=None,\n min_score_thresh=0.01,\n max_boxes_to_draw=1000,\n line_thickness=2,\n **kwargs):\n \"\"\"Visualizes a given image.\n\n Args:\n image: a image with shape [H, W, C].\n boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].\n classes: a class prediction with shape [N].\n scores: A list of float value with shape [N].\n label_map: a dictionary from class id to name.\n min_score_thresh: minimal score for showing. If claass probability is below\n this threshold, then the object will not show up.\n max_boxes_to_draw: maximum bounding box to draw.\n line_thickness: how thick is the bounding box line.\n **kwargs: extra parameters.\n\n Returns:\n output_image: an output image with annotated boxes and classes.\n \"\"\"\n label_map = label_util.get_label_map(label_map or 'coco')\n category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}\n img = np.array(image)\n vis_utils.visualize_boxes_and_labels_on_image_array(\n img,\n boxes,\n classes,\n scores,\n category_index,\n min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw,\n line_thickness=line_thickness,\n **kwargs)\n return img\n\n\nclass ExportNetwork(tf.Module):\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n @tf.function\n def __call__(self, imgs):\n return tf.nest.flatten(self.model(imgs, training=False))\n\n\nclass ExportModel(tf.Module):\n \"\"\"Model to be exported as SavedModel/TFLite format.\"\"\"\n\n def __init__(self, model, pre_mode='infer', post_mode='global'):\n super().__init__()\n self.model = model\n self.pre_mode = pre_mode\n self.post_mode = post_mode\n\n @tf.function\n def __call__(self, imgs):\n return self.model(\n imgs, training=False, pre_mode=self.pre_mode, post_mode=self.post_mode)\n\n\nclass ServingDriver:\n \"\"\"A driver for serving single or batch images.\n\n This driver supports serving with image files or arrays, with configurable\n batch size.\n\n Example 1. Serving streaming image contents:\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)\n driver.build()\n for m in image_iterator():\n predictions = driver.serve_files([m])\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)\n driver.visualize(m, boxes[0], scores[0], classes[0])\n # m is the new image with annotated boxes.\n\n Example 2. Serving batch image contents:\n\n imgs = []\n for f in ['/tmp/1.jpg', '/tmp/2.jpg']:\n imgs.append(np.array(Image.open(f)))\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))\n driver.build()\n predictions = driver.serve(imgs)\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)\n for i in range(len(imgs)):\n driver.visualize(imgs[i], boxes[i], scores[i], classes[i])\n\n Example 3: another way is to use SavedModel:\n\n # step1: export a model.\n driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')\n driver.build()\n driver.export('/tmp/saved_model_path')\n\n # step2: Serve a model.\n driver.load(self.saved_model_dir)\n raw_images = []\n for f in tf.io.gfile.glob('/tmp/images/*.jpg'):\n raw_images.append(np.array(PIL.Image.open(f)))\n detections = driver.serve(raw_images)\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections)\n for i in range(len(imgs)):\n driver.visualize(imgs[i], boxes[i], scores[i], classes[i])\n \"\"\"\n\n def __init__(self,\n model_name: Text,\n ckpt_path: Optional[Text] = None,\n batch_size: int = 1,\n only_network: bool = False,\n model_params: Optional[Dict[Text, Any]] = None,\n debug: bool = False):\n \"\"\"Initialize the inference driver.\n\n Args:\n model_name: target model name, such as efficientdet-d0.\n ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.\n batch_size: batch size for inference.\n only_network: only use the network without pre/post processing.\n model_params: model parameters for overriding the config.\n debug: bool, if true, run in debug mode.\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.batch_size = batch_size\n self.only_network = only_network\n self.debug = debug\n\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_map = self.params.get('label_map', None)\n\n self._model = None\n\n mixed_precision = self.params.get('mixed_precision', None)\n precision = utils.get_precision(\n self.params.get('strategy', None), mixed_precision)\n policy = tf.keras.mixed_precision.Policy(precision)\n tf.keras.mixed_precision.set_global_policy(policy)\n\n @property\n def model(self):\n if not self._model:\n self.build()\n return self._model\n\n @model.setter\n def model(self, model):\n self._model = model\n\n def build(self, params_override=None):\n \"\"\"Build model and restore checkpoints.\"\"\"\n params = copy.deepcopy(self.params)\n if params_override:\n params.update(params_override)\n config = hparams_config.get_efficientdet_config(self.model_name)\n config.override(params)\n if self.only_network:\n self.model = efficientdet_keras.EfficientDetNet(config=config)\n else:\n self.model = efficientdet_keras.EfficientDetModel(config=config)\n image_size = utils.parse_image_size(params['image_size'])\n self.model.build((self.batch_size, *image_size, 3))\n util_keras.restore_ckpt(self.model, self.ckpt_path,\n self.params['moving_average_decay'],\n skip_mismatch=False)\n if self.debug:\n tf.config.run_functions_eagerly(self.debug)\n\n def visualize(self, image, boxes, classes, scores, **kwargs):\n \"\"\"Visualize prediction on image.\"\"\"\n return visualize_image(image, boxes, classes.astype(int), scores,\n self.label_map, **kwargs)\n\n def benchmark(self, image_arrays, bm_runs=10, trace_filename=None):\n \"\"\"Benchmark inference latency/throughput.\n\n Args:\n image_arrays: a list of images in numpy array format.\n bm_runs: Number of benchmark runs.\n trace_filename: If None, specify the filename for saving trace.\n \"\"\"\n _, spec = self._get_model_and_spec()\n\n @tf.function(input_signature=[spec])\n def test_func(image_arrays):\n return self.model(image_arrays) # pylint: disable=not-callable\n\n for _ in range(3): # warmup 3 runs.\n test_func(image_arrays)\n\n start = time.perf_counter()\n for _ in range(bm_runs):\n test_func(image_arrays)\n end = time.perf_counter()\n inference_time = (end - start) / bm_runs\n\n print('Per batch inference time: ', inference_time)\n print('FPS: ', self.batch_size / inference_time)\n\n if trace_filename:\n options = tf.profiler.experimental.ProfilerOptions()\n tf.profiler.experimental.start(trace_filename, options)\n test_func(image_arrays)\n tf.profiler.experimental.stop()\n\n def serve(self, image_arrays):\n \"\"\"Serve a list of image arrays.\n\n Args:\n image_arrays: A list of image content with each image has shape [height,\n width, 3] and uint8 type.\n\n Returns:\n A list of detections.\n \"\"\"\n if isinstance(self.model, tf.lite.Interpreter):\n input_details = self.model.get_input_details()\n output_details = self.model.get_output_details()\n self.model.set_tensor(input_details[0]['index'], np.array(image_arrays))\n self.model.invoke()\n return [self.model.get_tensor(x['index']) for x in output_details]\n return self.model(image_arrays) # pylint: disable=not-callable\n\n def load(self, saved_model_dir_or_frozen_graph: Text):\n \"\"\"Load the model using saved model or a frozen graph.\"\"\"\n # Load saved model if it is a folder.\n if tf.saved_model.contains_saved_model(saved_model_dir_or_frozen_graph):\n self.model = tf.saved_model.load(saved_model_dir_or_frozen_graph)\n return\n\n if saved_model_dir_or_frozen_graph.endswith('.tflite'):\n self.model = tf.lite.Interpreter(saved_model_dir_or_frozen_graph)\n self.model.allocate_tensors()\n return\n\n # Load a frozen graph.\n def wrap_frozen_graph(graph_def, inputs, outputs):\n # https://www.tensorflow.org/guide/migrate\n imports_graph_def_fn = lambda: tf.import_graph_def(graph_def, name='')\n wrapped_import = tf.compat.v1.wrap_function(imports_graph_def_fn, [])\n import_graph = wrapped_import.graph\n return wrapped_import.prune(\n tf.nest.map_structure(import_graph.as_graph_element, inputs),\n tf.nest.map_structure(import_graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n with tf.io.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:\n graph_def.ParseFromString(f.read())\n\n self.model = wrap_frozen_graph(\n graph_def,\n inputs='images:0',\n outputs=['Identity:0', 'Identity_1:0', 'Identity_2:0', 'Identity_3:0'])\n\n def freeze(self, func):\n \"\"\"Freeze the graph.\"\"\"\n # pylint: disable=g-import-not-at-top,disable=g-direct-tensorflow-import\n from tensorflow.python.framework.convert_to_constants \\\n import convert_variables_to_constants_v2_as_graph\n _, graphdef = convert_variables_to_constants_v2_as_graph(func)\n return graphdef\n\n def _get_model_and_spec(self, tflite=None):\n \"\"\"Get model instance and export spec.\"\"\"\n if self.only_network or tflite:\n image_size = utils.parse_image_size(self.params['image_size'])\n spec = tf.TensorSpec(\n shape=[self.batch_size, *image_size, 3],\n dtype=tf.float32,\n name='images')\n if self.only_network:\n export_model = ExportNetwork(self.model)\n else:\n # If export tflite, we should remove preprocessing since TFLite doesn't\n # support dynamic shape.\n logging.info('Export model without preprocessing.')\n # This section is only used for TFLite, so we use the applicable\n # pre_ & post_ modes.\n export_model = ExportModel(\n self.model, pre_mode=None, post_mode='tflite')\n return export_model, spec\n else:\n spec = tf.TensorSpec(\n shape=[self.batch_size, None, None, 3], dtype=tf.uint8, name='images')\n export_model = ExportModel(self.model)\n return export_model, spec\n\n def export(self,\n output_dir: Optional[Text] = None,\n tensorrt: Optional[Text] = None,\n tflite: Optional[Text] = None,\n file_pattern: Optional[Text] = None,\n num_calibration_steps: int = 2000):\n \"\"\"Export a saved model, frozen graph, and potential tflite/tensorrt model.\n\n Args:\n output_dir: the output folder for saved model.\n tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.\n tflite: Type for post-training quantization.\n file_pattern: Glob for tfrecords, e.g. coco/val-*.tfrecord.\n num_calibration_steps: Number of post-training quantization calibration\n steps to run.\n \"\"\"\n export_model, input_spec = self._get_model_and_spec(tflite)\n image_size = utils.parse_image_size(self.params['image_size'])\n if output_dir:\n tf.saved_model.save(\n export_model,\n output_dir,\n signatures=export_model.__call__.get_concrete_function(input_spec))\n logging.info('Model saved at %s', output_dir)\n\n # also save freeze pb file.\n graphdef = self.freeze(\n export_model.__call__.get_concrete_function(input_spec))\n proto_path = tf.io.write_graph(\n graphdef, output_dir, self.model_name + '_frozen.pb', as_text=False)\n logging.info('Frozen graph saved at %s', proto_path)\n\n if tflite:\n shape = (self.batch_size, *image_size, 3)\n input_spec = tf.TensorSpec(\n shape=shape, dtype=input_spec.dtype, name=input_spec.name)\n # from_saved_model supports advanced converter features like op fusing.\n converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)\n if tflite == 'FP32':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_types = [tf.float32]\n elif tflite == 'FP16':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_types = [tf.float16]\n elif tflite == 'INT8':\n # Enables MLIR-based post-training quantization.\n converter.experimental_new_quantizer = True\n if file_pattern:\n config = hparams_config.get_efficientdet_config(self.model_name)\n config.override(self.params)\n ds = dataloader.InputReader(\n file_pattern,\n is_training=False,\n max_instances_per_image=config.max_instances_per_image)(\n config, batch_size=self.batch_size)\n\n def representative_dataset_gen():\n for image, _ in ds.take(num_calibration_steps):\n yield [image]\n else: # Used for debugging, can remove later.\n logging.warn('Use real representative dataset instead of fake ones.')\n num_calibration_steps = 10\n def representative_dataset_gen(): # rewrite this for real data.\n for _ in range(num_calibration_steps):\n yield [tf.ones(shape, dtype=input_spec.dtype)]\n\n converter.representative_dataset = representative_dataset_gen\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.inference_input_type = tf.uint8\n # TFLite's custom NMS op isn't supported by post-training quant,\n # so we add TFLITE_BUILTINS as well.\n supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS\n ]\n converter.target_spec.supported_ops = supported_ops\n\n else:\n raise ValueError(f'Invalid tflite {tflite}: must be FP32, FP16, INT8.')\n\n tflite_path = os.path.join(output_dir, tflite.lower() + '.tflite')\n tflite_model = converter.convert()\n tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)\n logging.info('TFLite is saved at %s', tflite_path)\n\n if tensorrt:\n trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())\n conversion_params = tf.experimental.tensorrt.ConversionParams(\n max_workspace_size_bytes=(2 << 20),\n maximum_cached_engines=1,\n precision_mode=tensorrt.upper())\n converter = tf.experimental.tensorrt.Converter(\n output_dir, conversion_params=conversion_params)\n converter.convert()\n converter.save(trt_path)\n logging.info('TensorRT model is saved at %s', trt_path)\n", "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test for wbf.\"\"\"\nfrom absl import logging\nimport tensorflow as tf\n\nfrom . import wbf\n\n\nclass WbfTest(tf.test.TestCase):\n\n def test_detection_iou_same(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 1, 1, 3, 3, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0)\n\n def test_detection_iou_corners(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 2, 2, 4, 4, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0 / 7.0)\n\n def test_detection_iou_ends(self):\n d1 = tf.constant([[1, 1, 1, 3, 2, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 2, 1, 4, 2, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0 / 3.0)\n\n def test_detection_iou_none(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 3, 3, 5, 5, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 0)\n\n def test_detection_iou_vector(self):\n vector_to_match = tf.constant(\n [\n [1, 1, 1, 3, 3, 1, 1],\n [1, 2, 2, 4, 4, 1, 1],\n [1, 3, 3, 5, 5, 1, 1],\n ],\n dtype=tf.float32,\n )\n\n detection = tf.constant([1, 1, 1, 3, 3, 1, 1], dtype=tf.float32)\n\n ious = wbf.vectorized_iou(vector_to_match, detection)\n self.assertAllClose(tf.reshape(ious, [3]), [1, 1.0 / 7.0, 0])\n\n def test_find_matching_cluster_matches(self):\n matching_cluster = tf.constant([1, 1, 1, 2, 2, 1, 1], dtype=tf.float32)\n non_matching_cluster = tf.constant([1, 3, 3, 2, 2, 1, 1], dtype=tf.float32)\n\n box = tf.constant([1, 1, 1, 2, 2, 1, 1], dtype=tf.float32)\n\n cluster_index = wbf.find_matching_cluster(\n (matching_cluster, non_matching_cluster), box)\n\n self.assertAllClose(cluster_index, 0)\n\n cluster_index = wbf.find_matching_cluster(\n (non_matching_cluster, matching_cluster), box)\n\n self.assertAllClose(cluster_index, 1)\n\n def test_find_matching_cluster_best_overlap(self):\n overlaps = tf.constant([1, 1, 1, 11, 2, 1, 1], dtype=tf.float32)\n overlaps_better = tf.constant([1, 2, 1, 12, 2, 1, 1], dtype=tf.float32)\n\n box = tf.constant([1, 3, 1, 13, 2, 1, 1], dtype=tf.float32)\n\n cluster_index = wbf.find_matching_cluster((overlaps,), box)\n\n self.assertAllClose(cluster_index, 0)\n\n cluster_index = wbf.find_matching_cluster((overlaps, overlaps_better), box)\n\n self.assertAllClose(cluster_index, 1)\n\n def test_weighted_average(self):\n samples = tf.constant([1, 3], dtype=tf.float32)\n\n weights1 = tf.constant([0.5, 0.5], dtype=tf.float32)\n weighted_average1 = wbf.weighted_average(samples, weights1)\n\n self.assertAllClose(weighted_average1, 2)\n\n weights2 = tf.constant([1, 0], dtype=tf.float32)\n weighted_average2 = wbf.weighted_average(samples, weights2)\n\n self.assertAllClose(weighted_average2, 1)\n\n weights3 = tf.constant([1, 2], dtype=tf.float32)\n weighted_average3 = wbf.weighted_average(samples, weights3)\n\n self.assertAllClose(weighted_average3, 7.0 / 3.0)\n\n def test_average_detections(self):\n d1 = tf.constant([1, 1, 1, 2, 2, 0.3, 1], dtype=tf.float32)\n d2 = tf.constant([1, 3, 3, 4, 4, 0.7, 1], dtype=tf.float32)\n\n averaged_single_model = wbf.average_detections((d1, d2), 1)\n self.assertAllClose(averaged_single_model, [1, 2.4, 2.4, 3.4, 3.4, 0.5, 1])\n\n averaged_multi_model = wbf.average_detections((d1, d2), 3)\n self.assertAllClose(averaged_multi_model,\n [1, 2.4, 2.4, 3.4, 3.4, 0.333333, 1])\n\n averaged_single_detection = wbf.average_detections((d2,), 2)\n self.assertAllClose(averaged_single_detection, [1, 3, 3, 4, 4, 0.35, 1])\n\n def test_ensemble_boxes(self):\n d1 = tf.constant([1, 2, 1, 10, 1, 0.75, 1], dtype=tf.float32)\n d2 = tf.constant([1, 3, 1, 10, 1, 0.75, 1], dtype=tf.float32)\n d3 = tf.constant([1, 3, 1, 10, 1, 1, 2], dtype=tf.float32)\n\n ensembled = wbf.ensemble_detections({'num_classes': 3},\n tf.stack([d1, d2, d3]), 2)\n\n self.assertAllClose(ensembled,\n [[1, 2.5, 1, 10, 1, 0.75, 1], [1, 3, 1, 10, 1, 0.5, 2]])\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n tf.test.main()\n", "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"EfficientDet model definition.\n\n[1] Mingxing Tan, Ruoming Pang, Quoc Le.\n EfficientDet: Scalable and Efficient Object Detection.\n CVPR 2020, https://arxiv.org/abs/1911.09070\n\"\"\"\nimport functools\nimport re\n\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom . import hparams_config\nfrom . import utils\nfrom .backbone import backbone_factory\nfrom .backbone import efficientnet_builder\nfrom .keras import fpn_configs\n\n\n################################################################################\ndef freeze_vars(variables, pattern):\n \"\"\"Removes backbone+fpn variables from the input.\n\n Args:\n variables: all the variables in training\n pattern: a reg experession such as \".*(efficientnet|fpn_cells).*\".\n\n Returns:\n var_list: a list containing variables for training\n \"\"\"\n if pattern:\n filtered_vars = [v for v in variables if not re.match(pattern, v.name)]\n if len(filtered_vars) == len(variables):\n logging.warning('%s didnt match with any variable. Please use compatible '\n 'pattern. i.e \"(efficientnet)\"', pattern)\n return filtered_vars\n return variables\n\n\ndef resample_feature_map(feat,\n name,\n target_height,\n target_width,\n target_num_channels,\n apply_bn=False,\n is_training=None,\n conv_after_downsample=False,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Resample input feature map to have target number of channels and size.\"\"\"\n if data_format == 'channels_first':\n _, num_channels, height, width = feat.get_shape().as_list()\n else:\n _, height, width, num_channels = feat.get_shape().as_list()\n\n if height is None or width is None or num_channels is None:\n raise ValueError(\n 'shape[1] or shape[2] or shape[3] of feat is None (shape:{}).'.format(\n feat.shape))\n if apply_bn and is_training is None:\n raise ValueError('If BN is applied, need to provide is_training')\n\n def _maybe_apply_1x1(feat):\n \"\"\"Apply 1x1 conv to change layer width if necessary.\"\"\"\n if num_channels != target_num_channels:\n feat = tf.layers.conv2d(\n feat,\n filters=target_num_channels,\n kernel_size=(1, 1),\n padding='same',\n data_format=data_format)\n if apply_bn:\n feat = utils.batch_norm_act(\n feat,\n is_training_bn=is_training,\n act_type=None,\n data_format=data_format,\n strategy=strategy,\n name='bn')\n return feat\n\n with tf.variable_scope('resample_{}'.format(name)):\n # If conv_after_downsample is True, when downsampling, apply 1x1 after\n # downsampling for efficiency.\n if height > target_height and width > target_width:\n if not conv_after_downsample:\n feat = _maybe_apply_1x1(feat)\n height_stride_size = int((height - 1) // target_height + 1)\n width_stride_size = int((width - 1) // target_width + 1)\n\n # Use max pooling in default.\n feat = tf.layers.max_pooling2d(\n inputs=feat,\n pool_size=[height_stride_size + 1, width_stride_size + 1],\n strides=[height_stride_size, width_stride_size],\n padding='SAME',\n data_format=data_format)\n\n if conv_after_downsample:\n feat = _maybe_apply_1x1(feat)\n elif height <= target_height and width <= target_width:\n feat = _maybe_apply_1x1(feat)\n if height < target_height or width < target_width:\n if data_format == 'channels_first':\n feat = tf.transpose(feat, [0, 2, 3, 1])\n feat = tf.cast(\n tf.image.resize_nearest_neighbor(\n tf.cast(feat, tf.float32), [target_height, target_width]),\n dtype=feat.dtype)\n if data_format == 'channels_first':\n feat = tf.transpose(feat, [0, 3, 1, 2])\n else:\n raise ValueError(\n 'Incompatible target feature map size: target_height: {},'\n 'target_width: {}'.format(target_height, target_width))\n\n return feat\n\n\n###############################################################################\ndef class_net(images,\n level,\n num_classes,\n num_anchors,\n num_filters,\n is_training,\n act_type,\n separable_conv=True,\n repeats=4,\n survival_prob=None,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Class prediction network.\"\"\"\n if separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1,\n data_format=data_format,\n pointwise_initializer=tf.initializers.variance_scaling(),\n depthwise_initializer=tf.initializers.variance_scaling())\n else:\n conv_op = functools.partial(\n tf.layers.conv2d,\n data_format=data_format,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))\n\n for i in range(repeats):\n orig_images = images\n images = conv_op(\n images,\n num_filters,\n kernel_size=3,\n bias_initializer=tf.zeros_initializer(),\n activation=None,\n padding='same',\n name='class-%d' % i)\n images = utils.batch_norm_act(\n images,\n is_training,\n act_type=act_type,\n init_zero=False,\n strategy=strategy,\n data_format=data_format,\n name='class-%d-bn-%d' % (i, level))\n\n if i > 0 and survival_prob:\n images = utils.drop_connect(images, is_training, survival_prob)\n images = images + orig_images\n\n classes = conv_op(\n images,\n num_classes * num_anchors,\n kernel_size=3,\n bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),\n padding='same',\n name='class-predict')\n return classes\n\n\ndef box_net(images,\n level,\n num_anchors,\n num_filters,\n is_training,\n act_type,\n repeats=4,\n separable_conv=True,\n survival_prob=None,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Box regression network.\"\"\"\n if separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1,\n data_format=data_format,\n pointwise_initializer=tf.initializers.variance_scaling(),\n depthwise_initializer=tf.initializers.variance_scaling())\n else:\n conv_op = functools.partial(\n tf.layers.conv2d,\n data_format=data_format,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))\n\n for i in range(repeats):\n orig_images = images\n images = conv_op(\n images,\n num_filters,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-%d' % i)\n images = utils.batch_norm_act(\n images,\n is_training,\n act_type=act_type,\n init_zero=False,\n strategy=strategy,\n data_format=data_format,\n name='box-%d-bn-%d' % (i, level))\n\n if i > 0 and survival_prob:\n images = utils.drop_connect(images, is_training, survival_prob)\n images = images + orig_images\n\n boxes = conv_op(\n images,\n 4 * num_anchors,\n kernel_size=3,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-predict')\n\n return boxes\n\n\ndef build_class_and_box_outputs(feats, config):\n \"\"\"Builds box net and class net.\n\n Args:\n feats: input tensor.\n config: a dict-like config, including all parameters.\n\n Returns:\n A tuple (class_outputs, box_outputs) for class/box predictions.\n \"\"\"\n\n class_outputs = {}\n box_outputs = {}\n num_anchors = len(config.aspect_ratios) * config.num_scales\n cls_fsize = config.fpn_num_filters\n with tf.variable_scope('class_net', reuse=tf.AUTO_REUSE):\n for level in range(config.min_level,\n config.max_level + 1):\n class_outputs[level] = class_net(\n images=feats[level],\n level=level,\n num_classes=config.num_classes,\n num_anchors=num_anchors,\n num_filters=cls_fsize,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format\n )\n\n box_fsize = config.fpn_num_filters\n with tf.variable_scope('box_net', reuse=tf.AUTO_REUSE):\n for level in range(config.min_level,\n config.max_level + 1):\n box_outputs[level] = box_net(\n images=feats[level],\n level=level,\n num_anchors=num_anchors,\n num_filters=box_fsize,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format)\n\n return class_outputs, box_outputs\n\n\ndef build_backbone(features, config):\n \"\"\"Builds backbone model.\n\n Args:\n features: input tensor.\n config: config for backbone, such as is_training_bn and backbone name.\n\n Returns:\n A dict from levels to the feature maps from the output of the backbone model\n with strides of 8, 16 and 32.\n\n Raises:\n ValueError: if backbone_name is not supported.\n \"\"\"\n backbone_name = config.backbone_name\n is_training_bn = config.is_training_bn\n if 'efficientnet' in backbone_name:\n override_params = {\n 'batch_norm':\n utils.batch_norm_class(is_training_bn, config.strategy),\n 'relu_fn':\n functools.partial(utils.activation_fn, act_type=config.act_type),\n }\n if 'b0' in backbone_name:\n override_params['survival_prob'] = 0.0\n if config.backbone_config is not None:\n override_params['blocks_args'] = (\n efficientnet_builder.BlockDecoder().encode(\n config.backbone_config.blocks))\n override_params['data_format'] = config.data_format\n model_builder = backbone_factory.get_model_builder(backbone_name)\n _, endpoints = model_builder.build_model_base(\n features,\n backbone_name,\n training=is_training_bn,\n override_params=override_params)\n u1 = endpoints[0]\n u2 = endpoints[1]\n u3 = endpoints[2]\n u4 = endpoints[3]\n u5 = endpoints[4]\n else:\n raise ValueError(\n 'backbone model {} is not supported.'.format(backbone_name))\n return {0: features, 1: u1, 2: u2, 3: u3, 4: u4, 5: u5}\n\n\ndef build_feature_network(features, config):\n \"\"\"Build FPN input features.\n\n Args:\n features: input tensor.\n config: a dict-like config, including all parameters.\n\n Returns:\n A dict from levels to the feature maps processed after feature network.\n \"\"\"\n feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level)\n feats = []\n if config.min_level not in features.keys():\n raise ValueError('features.keys ({}) should include min_level ({})'.format(\n features.keys(), config.min_level))\n\n # Build additional input features that are not from backbone.\n for level in range(config.min_level, config.max_level + 1):\n if level in features.keys():\n feats.append(features[level])\n else:\n h_id, w_id = (2, 3) if config.data_format == 'channels_first' else (1, 2)\n # Adds a coarser level by downsampling the last feature map.\n feats.append(\n resample_feature_map(\n feats[-1],\n name='p%d' % level,\n target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,\n target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,\n target_num_channels=config.fpn_num_filters,\n apply_bn=config.apply_bn_for_resampling,\n is_training=config.is_training_bn,\n conv_after_downsample=config.conv_after_downsample,\n strategy=config.strategy,\n data_format=config.data_format\n ))\n\n utils.verify_feats_size(\n feats,\n feat_sizes=feat_sizes,\n min_level=config.min_level,\n max_level=config.max_level,\n data_format=config.data_format)\n\n with tf.variable_scope('fpn_cells'):\n for rep in range(config.fpn_cell_repeats):\n with tf.variable_scope('cell_{}'.format(rep)):\n logging.info('building cell %d', rep)\n new_feats = build_bifpn_layer(feats, feat_sizes, config)\n\n feats = [\n new_feats[level]\n for level in range(\n config.min_level, config.max_level + 1)\n ]\n\n utils.verify_feats_size(\n feats,\n feat_sizes=feat_sizes,\n min_level=config.min_level,\n max_level=config.max_level,\n data_format=config.data_format)\n\n return new_feats\n\n\ndef fuse_features(nodes, weight_method):\n \"\"\"Fuse features from different resolutions and return a weighted sum.\n\n Args:\n nodes: a list of tensorflow features at different levels\n weight_method: feature fusion method. One of:\n - \"attn\" - Softmax weighted fusion\n - \"fastattn\" - Fast normalzied feature fusion\n - \"sum\" - a sum of inputs\n\n Returns:\n A tensor denoting the fused feature.\n \"\"\"\n dtype = nodes[0].dtype\n\n if weight_method == 'attn':\n edge_weights = [tf.cast(tf.Variable(1.0, name='WSM'), dtype=dtype)\n for _ in nodes]\n normalized_weights = tf.nn.softmax(tf.stack(edge_weights))\n nodes = tf.stack(nodes, axis=-1)\n new_node = tf.reduce_sum(nodes * normalized_weights, -1)\n elif weight_method == 'fastattn':\n edge_weights = [\n tf.nn.relu(tf.cast(tf.Variable(1.0, name='WSM'), dtype=dtype))\n for _ in nodes\n ]\n weights_sum = tf.add_n(edge_weights)\n nodes = [nodes[i] * edge_weights[i] / (weights_sum + 0.0001)\n for i in range(len(nodes))]\n new_node = tf.add_n(nodes)\n elif weight_method == 'channel_attn':\n num_filters = int(nodes[0].shape[-1])\n edge_weights = [\n tf.cast(\n tf.Variable(lambda: tf.ones([num_filters]), name='WSM'),\n dtype=dtype) for _ in nodes\n ]\n normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)\n nodes = tf.stack(nodes, axis=-1)\n new_node = tf.reduce_sum(nodes * normalized_weights, -1)\n elif weight_method == 'channel_fastattn':\n num_filters = int(nodes[0].shape[-1])\n edge_weights = [\n tf.nn.relu(tf.cast(\n tf.Variable(lambda: tf.ones([num_filters]), name='WSM'),\n dtype=dtype)) for _ in nodes\n ]\n weights_sum = tf.add_n(edge_weights)\n nodes = [nodes[i] * edge_weights[i] / (weights_sum + 0.0001)\n for i in range(len(nodes))]\n new_node = tf.add_n(nodes)\n elif weight_method == 'sum':\n new_node = tf.add_n(nodes)\n else:\n raise ValueError(\n 'unknown weight_method {}'.format(weight_method))\n\n return new_node\n\n\ndef build_bifpn_layer(feats, feat_sizes, config):\n \"\"\"Builds a feature pyramid given previous feature pyramid and config.\"\"\"\n p = config # use p to denote the network config.\n if p.fpn_config:\n fpn_config = p.fpn_config\n else:\n fpn_config = fpn_configs.get_fpn_config(p.fpn_name, p.min_level,\n p.max_level, p.fpn_weight_method)\n\n num_output_connections = [0 for _ in feats]\n for i, fnode in enumerate(fpn_config.nodes):\n with tf.variable_scope('fnode{}'.format(i)):\n logging.info('fnode %d : %s', i, fnode)\n new_node_height = feat_sizes[fnode['feat_level']]['height']\n new_node_width = feat_sizes[fnode['feat_level']]['width']\n nodes = []\n for idx, input_offset in enumerate(fnode['inputs_offsets']):\n input_node = feats[input_offset]\n num_output_connections[input_offset] += 1\n input_node = resample_feature_map(\n input_node, '{}_{}_{}'.format(idx, input_offset, len(feats)),\n new_node_height, new_node_width, p.fpn_num_filters,\n p.apply_bn_for_resampling, p.is_training_bn,\n p.conv_after_downsample,\n strategy=p.strategy,\n data_format=config.data_format)\n nodes.append(input_node)\n\n new_node = fuse_features(nodes, fpn_config.weight_method)\n\n with tf.variable_scope('op_after_combine{}'.format(len(feats))):\n if not p.conv_bn_act_pattern:\n new_node = utils.activation_fn(new_node, p.act_type)\n\n if p.separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1)\n else:\n conv_op = tf.layers.conv2d\n\n new_node = conv_op(\n new_node,\n filters=p.fpn_num_filters,\n kernel_size=(3, 3),\n padding='same',\n use_bias=not p.conv_bn_act_pattern,\n data_format=config.data_format,\n name='conv')\n\n new_node = utils.batch_norm_act(\n new_node,\n is_training_bn=p.is_training_bn,\n act_type=None if not p.conv_bn_act_pattern else p.act_type,\n data_format=config.data_format,\n strategy=p.strategy,\n name='bn')\n\n feats.append(new_node)\n num_output_connections.append(0)\n\n output_feats = {}\n for l in range(p.min_level, p.max_level + 1):\n for i, fnode in enumerate(reversed(fpn_config.nodes)):\n if fnode['feat_level'] == l:\n output_feats[l] = feats[-1 - i]\n break\n return output_feats\n\n\ndef efficientdet(features, model_name=None, config=None, **kwargs):\n \"\"\"Build EfficientDet model.\"\"\"\n if not config and not model_name:\n raise ValueError('please specify either model name or config')\n\n if not config:\n config = hparams_config.get_efficientdet_config(model_name)\n elif isinstance(config, dict):\n config = hparams_config.Config(config) # wrap dict in Config object\n\n if kwargs:\n config.override(kwargs)\n\n logging.info(config)\n\n # build backbone features.\n features = build_backbone(features, config)\n logging.info('backbone params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build feature network.\n fpn_feats = build_feature_network(features, config)\n logging.info('backbone+fpn params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build class and box predictions.\n class_outputs, box_outputs = build_class_and_box_outputs(fpn_feats, config)\n logging.info('backbone+fpn+box params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n return class_outputs, box_outputs\n" ]
[ [ "tensorflow.compat.v1.wrap_function", "tensorflow.profiler.experimental.stop", "tensorflow.saved_model.contains_saved_model", "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.keras.mixed_precision.set_global_policy", "tensorflow.lite.Interpreter", "tensorflow.io.gfile.GFile", "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.config.run_functions_eagerly", "tensorflow.io.write_graph", "tensorflow.experimental.tensorrt.Converter", "tensorflow.keras.mixed_precision.Policy", "tensorflow.function", "numpy.array", "tensorflow.profiler.experimental.ProfilerOptions", "tensorflow.profiler.experimental.start", "tensorflow.saved_model.load", "tensorflow.ones", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2_as_graph", "tensorflow.TensorSpec", "tensorflow.nest.map_structure" ], [ "tensorflow.stack", "tensorflow.reshape", "tensorflow.constant", "tensorflow.test.main" ], [ "tensorflow.compat.v1.initializers.variance_scaling", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.random_normal_initializer", "numpy.log", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.layers.conv2d", "tensorflow.compat.v1.add_n", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.layers.max_pooling2d", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fossabot/onnx-mlir
[ "ed1377c26b1be69b9b0ed6942025197491ca6c7e" ]
[ "utils/gen_doc.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nargs = parser.parse_args()\n\n# Manual specification of attribute defaults.\nspecial_attr_defaults = dict([\n # (\"AveragePool.kernel_shape\", ('ints', '{}')),\n # (\"MaxPool.kernel_shape\", ('ints', '{}')),\n # (\"Cast.to\", ('int', '0')),\n # (\"Concat.axis\", ('int', '0')),\n # (\"Conv.group\", ('int', '1')),\n # (\"Unsqueeze.axes\", ('ints', '{}')),\n # (\"RNN.activation_alpha\", ('floats', '{}')),\n # (\"RNN.activation_beta\", ('floats', '{}')),\n])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Reshape\", \"ImportNodeReshape\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting shape inference.\nOpsWithShapeInference = [\n 'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',\n 'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',\n 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',\n 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',\n 'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',\n 'Sign', 'Constant', 'AveragePool', 'Abs', 'Conv', 'Concat', 'Neg'\n]\n\n# Operations supporting canonicalization.\nOpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv']\n\n# Operations who have operands that, if produced by constant operations, should\n# be promoted to become an attribute (via attribute promotion).\n#\n# For each operation, a key/value pair is used to specify how attribute promotion\n# should proceed. The key is the operation's name and the value is a list of\n# tuples, whose first item is the attribute/operand name, and the second item is\n# the index at which such operand occurs in the list of the operation's inputs.\nOpsWithPromotableConstOperands = {\"Reshape\": [(\"shape\", 1)]}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currenlty, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']\n\nSNIPPETS = collect_snippets()\nSAMPLE_IMPLEMENTATIONS = collect_sample_implementations()\nONNX_ML = not bool(os.getenv('ONNX_ML') == '0')\n\nONNX_ML = False\nsys.stderr.write(\"ONNX_ML {}\\n\".format(ONNX_ML))\n\nif ONNX_ML:\n ext = '-ml.md'\nelse:\n ext = '.md'\n\n\ndef should_render_domain(domain): # type: (Text) -> bool\n if domain == ONNX_ML_DOMAIN and not ONNX_ML:\n return False\n elif ONNX_ML and domain != ONNX_ML_DOMAIN:\n return False\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'I64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n tfrom = np.array([\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double'\n ])\n tto = np.array(\n ['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])\n index = -1\n for i in range(len(tfrom)):\n if tfrom[i] in tstr:\n index = i\n break\n if index == -1:\n print(\"error\", tstr)\n return ''\n else:\n return tto[i]\n\n\ndef get_allowed_elem_types(schema, input):\n allowed_types_str = None\n return allowed_types_str\n # TODO: enable type constraints.\n # if input.typeStr :\n # tstr = input.typeStr\n # else :\n # return allwedTypeStr\n # if schema.type_constraints:\n # for type_constraint in schema.type_constraints:\n # if type_constraint.type_param_str != tstr :\n # continue\n # allowedTypes = type_constraint.allowed_type_strs\n # allowedTypeStr=''\n # if (len(allowedTypes) > 0):\n # t = convert_type(allowedTypes[0])\n # if t == '' :\n # return ''\n # allowedTypeStr += t\n # for allowedType in allowedTypes[1:]:\n # t = convert_type(allowedType)\n # if t == '' :\n # return ''\n # if not t in allowedTypeStr :\n # allowedTypeStr += ', '+t\n #\n # return allowedTypeStr\n #\n # return allowedTypeStr\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\n\ndef get_operands_or_results(schema, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n elem_types = get_allowed_elem_types(schema, value)\n\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types), types))\n\n # If operand is promotable to an attribute, then it must be\n # nullable in case it migrates to be an attribute.\n if schema.name in OpsWithPromotableConstOperands:\n idxs = dict(OpsWithPromotableConstOperands[schema.name]).values()\n if i in idxs:\n types.append(\"NoneType\")\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n sys.stderr.write(\"warning: (variadic, heterogeneous) for\" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_defaults:\n name_to_type[attr.name] = get_attr_type_with_default(\n *special_attr_defaults[qualified_attr_name])\n\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n indent = dec_indent(indent)\n s += indent + \"}];\\n\"\n\n return s\n\n\ndef gen_op_def(schema):\n indent = inc_indent()\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(schema.name)\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n if schema.name in OpsWithShapeInference:\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if schema.name in OpsWithPromotableConstOperands.keys():\n traits.append(\"OpInterface<\\\"PromotableConstOperandsOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if schema.name in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, is_input=True)\n ins.update(get_attrs(schema))\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if schema.name in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a seperate parameter.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A\", [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state'\n operands_dict = get_operands_or_results(schema, is_input=True)\n for name, ty in operands_dict.items():\n s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),\n name)\n for name, ty in get_attrs(schema).items():\n s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)\n s += '\", [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\\n'.format(\n first_operand_name)\n s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{\\n'\n indent = inc_indent(indent)\n s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\\n'\n s += indent + 'build(builder, state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n if schema.name in OpsWithPromotableConstOperands:\n s = get_promotable_const_operands_func(\n s, indent, OpsWithPromotableConstOperands[schema.name])\n s += '}\\n\\n'\n return s\n\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file):\n indent = inc_indent()\n s = indent + 'if (opName == \"' + schema.name + '\")\\n'\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(schema.name))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n s += inc_indent(indent) + \"return {}({});\\n\".format(\n handler_func, \", \".join(args))\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/readonnxdefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n for op_type, schema, versions in namemap:\n gen_op_importer(schema, op_importer)\n r = gen_op_def(schema)\n op_def.write(r)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n if args.dry_run_onnx_ops:\n op_def = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n\n if args.dry_run_op_build_table:\n op_importer = StringIO()\n else:\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n\n" ]
[ [ "numpy.round", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexhunterlang/natural_bm
[ "b2a1cb15694f4f3a80a3a1cc6f8423892563806d", "b2a1cb15694f4f3a80a3a1cc6f8423892563806d", "b2a1cb15694f4f3a80a3a1cc6f8423892563806d" ]
[ "natural_bm/datasets/fast.py", "tests/natural_bm/test_datasets.py", "natural_bm/models.py" ]
[ "\"\"\"Simplified version of MNIST that is useful for demos and testing \"\"\"\n\n#%%\nimport numpy as np\n\ntry:\n import PIL.Image as Image\nexcept ImportError:\n import Image\n\nfrom natural_bm.datasets.common import Dataset, sample_data, threshold_data, convert2uint8\nfrom natural_bm.datasets import mnist\n\n\n#%%\nclass Fast(Dataset):\n def __init__(self, datatype):\n super().__init__('fast', datatype)\n\n def _create_probability(self):\n # Start from the MNIST probabilities\n prob = mnist.MNIST('probability')\n mnist_dataset = prob.dataset_dict\n\n def shrink_data(data, lbl, n_sample):\n # only keep 0's and 1's\n # subsample to 14 by 14\n # then just drop first 2, last 2 rows/cols since mainly zero\n\n new_data = np.zeros((2*n_sample, 10**2), dtype='float32')\n new_lbl = np.concatenate((np.zeros((n_sample, )),\n np.ones((n_sample, )))).astype('int32')\n\n index0 = np.where(lbl == 0)[0][0:n_sample]\n index1 = np.where(lbl == 1)[0][0:n_sample]\n index = np.concatenate((index0, index1))\n\n for i in range(new_data.shape[0]):\n img = Image.fromarray(data[index[i]].reshape((28, 28)))\n img_down = img.resize((14, 14))\n temp = np.asarray(img_down)\n temp = temp[:, 2:-2]\n temp = temp[2:-2]\n new_data[i] = temp.flatten()\n\n return new_data, new_lbl\n\n dataset = {}\n for dset in ['train', 'valid', 'test']:\n if dset == 'train':\n num_samples = 500\n else:\n num_samples = 50\n data, lbl = shrink_data(mnist_dataset[dset+'.data'],\n mnist_dataset[dset+'.lbl'],\n num_samples)\n dataset[dset+'.data'] = data\n dataset[dset+'.lbl'] = lbl\n\n # save the dataset\n np.savez_compressed(self.savename, **dataset)\n\n def _create_sampled(self):\n # Start from the probabilities\n prob = Fast('probability')\n datasets = prob.dataset_dict\n\n # do the sampling\n datasets = sample_data(datasets)\n\n # reduce precision, only need uint8\n datasets = convert2uint8(datasets)\n\n # Save the dataset\n np.savez_compressed(self.savename, **datasets)\n\n def _create_threshold(self):\n # Start from the probabilities\n prob = Fast('probability')\n datasets = prob.dataset_dict\n\n # threshold the data\n datasets = threshold_data(datasets)\n\n # reduce precision, only need uint8\n datasets = convert2uint8(datasets)\n\n # Save the dataset\n np.savez_compressed(self.savename, **datasets)\n", "#%%\nimport os\nimport numpy as np\nimport pytest\nfrom natural_bm.datasets.common import threshold_data\nfrom natural_bm.datasets import mnist, svhn, fast\nimport natural_bm.backend as B\n\n\n#%%\ndef test_treshold_data():\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n datasets = threshold_data(datasets, threshold=None)\n assert np.all(datasets['train.data'] == 1.0)\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n datasets = threshold_data(datasets, threshold=0.7)\n assert np.all(datasets['train.data'] == 0.0)\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n threshold = np.concatenate((0.7*np.ones((5,)), 0.5*np.ones((5,))))\n datasets = threshold_data(datasets, threshold=threshold)\n verify = np.concatenate((np.zeros((100, 5)), np.ones((100, 5))), axis=1)\n assert np.all(datasets['train.data'] == verify)\n\n\n#%% \ndef test_mnist():\n\n name = 'mnist'\n datatype_ls = ['probability', 'sampled', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n print(folder)\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = mnist.MNIST(datatype)\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = mnist.MNIST(datatype)\n\n\n#%% \ndef test_fast():\n\n name = 'fast'\n datatype_ls = ['probability', 'sampled', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n train_samples = 1000\n other_samples = 100\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = fast.Fast(datatype)\n assert B.eval(data.train.data).shape[0] == train_samples\n assert B.eval(data.valid.data).shape[0] == other_samples\n assert B.eval(data.test.data).shape[0] == other_samples\n assert B.eval(data.train.lbl).shape[0] == train_samples\n assert B.eval(data.valid.lbl).shape[0] == other_samples\n assert B.eval(data.test.lbl).shape[0] == other_samples\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = fast.Fast(datatype)\n assert B.eval(data.train.data).shape[0] == train_samples\n assert B.eval(data.valid.data).shape[0] == other_samples\n assert B.eval(data.test.data).shape[0] == other_samples\n assert B.eval(data.train.lbl).shape[0] == train_samples\n assert B.eval(data.valid.lbl).shape[0] == other_samples\n assert B.eval(data.test.lbl).shape[0] == other_samples\n\n\n#%%\ndef longtest_svhn(__file__):\n \"\"\"\n This test is internet dependent and requires a large downloand.\n Since it is slow, I did not include it in auto pytesting.\n \"\"\"\n \n name = 'svhn'\n datatype_ls = ['probability', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = svhn.SVHN(datatype)\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = svhn.SVHN(datatype)\n\n\n#%%\nif __name__ == '__main__':\n # This test will take a couple of minutes depending on your internet speed\n # longtest_svhn(__file__)\n\n pytest.main([__file__])\n", "\"\"\"A model organizes the training of a neural network.\n\nThe general structure, and especial the fit method, are similar to the keras\nModel class.\n\"\"\"\n\n#%%\nimport copy\nimport numpy as np\nimport time\nimport warnings\n\nimport natural_bm.backend as B\nimport natural_bm.callbacks as cbks\nfrom natural_bm.utils import merge_OrderedDicts\nfrom natural_bm.callbacks import CSVLogger\n\n#%%\ndef check_batches(size, batch_size): \n \"\"\"Checks batches on the first epoch to see if any data is missed \"\"\"\n if np.mod(size, batch_size) > 0:\n warn = 'Batch size does not evenly divide into data. Remainders are ignored.'\n warnings.warn(warn)\n\n\n#%%\ndef make_batches(size, batch_size, epoch=None):\n \"\"\"Returns a list of batch indices (tuples of indices). \"\"\"\n\n if epoch in [None, 0]:\n check_batches(size, batch_size)\n\n nb_batch = int(np.floor(size / float(batch_size)))\n batches = [(i * batch_size, min(size, (i + 1) * batch_size))\n for i in range(0, nb_batch)]\n\n return batches\n\n\n#%%\nclass Model:\n \"\"\"Class that handles the training of a neural network \"\"\"\n def __init__(self, nnet, optimizer, trainer):\n\n self.nnet = nnet\n self.optimizer = optimizer\n self.trainer = trainer\n\n self.inputs = B.placeholder(shape=(None, self.nnet.layer_size_list[0]), name='x')\n self.loss_fn = trainer.loss_fn()\n loss = self.loss_fn(self.inputs)\n for part in self.nnet.parts:\n for pl in part.losses:\n loss += pl \n self.loss = loss\n\n self.trainable_weights = self.nnet.trainable_weights\n self._updates = self.trainer.updates\n\n @property\n def _train_updates(self):\n training_updates = self.optimizer.get_updates(self.trainable_weights, self.loss)\n updates = merge_OrderedDicts(self._updates, training_updates)\n return updates\n\n def _make_function(self, index, data, updates, name):\n givens = {self.inputs: data[index]}\n fn = B.function([index],\n self.loss,\n updates=updates,\n givens=givens,\n name=name)\n\n return fn\n\n def _make_train_function(self):\n self.train_function = self._make_function(self.train_index,\n self.train_data,\n self._train_updates,\n 'train_function')\n\n def _make_validation_function(self):\n self.validation_function = self._make_function(self.valid_index,\n self.validation_data,\n self._updates,\n 'valid_function')\n\n def _make_test_function(self):\n self.test_function = self._make_function(self.test_index,\n self.test_data,\n self._updates,\n 'test_function')\n\n def _fit_loop(self,\n f,\n out_labels=None,\n batch_size=100,\n n_epoch=100,\n callbacks=None,\n val_f=None,\n shuffle=True,\n callback_metrics=None,\n initial_epoch=0):\n \"\"\"Abstract fit function for f.\n Assume that f returns a list, labeled by out_labels.\n \n # Arguments\n f: Backend function returning a list of tensors\n out_labels: list of strings, display names of\n the outputs of `f`\n batch_size: integer batch size\n n_epoch: number of times to iterate over the data\n callbacks: list of callbacks to be called during training\n val_f: Backend function to call for validation\n shuffle: whether to shuffle the data at the beginning of each epoch\n callback_metrics: list of strings, the display names of the metrics\n passed to the callbacks. They should be the\n concatenation of list the display names of the outputs of\n `f` and the list of display names of the outputs of `f_val`.\n initial_epoch: epoch at which to start training\n (useful for resuming a previous training run)\n # Returns\n `History` object.\n \"\"\"\n\n time_start = time.time()\n\n do_validation = False\n n_valid_sample = 0\n if val_f:\n do_validation = True\n n_valid_sample = B.eval(self.validation_data.shape[0])\n\n index_array = np.arange(self.n_train_sample, dtype='int32')\n\n self.history = cbks.History()\n # CSVLogger needs to be second to last callback\n # otherwise AIS results are not recorded \n callbacks = callbacks or []\n index_csv = None\n for i, cb in enumerate(callbacks):\n if isinstance(cb, CSVLogger):\n index_csv = i\n if index_csv is not None:\n cb_csv = callbacks.pop(index_csv)\n callbacks.append(cb_csv)\n callbacks = [cbks.BaseLogger()] + callbacks + [self.history]\n callbacks = cbks.CallbackList(callbacks)\n out_labels = out_labels or []\n callbacks.set_model(self)\n callbacks.set_params({\n 'batch_size': batch_size,\n 'n_epoch': n_epoch,\n 'n_sample': self.n_train_sample,\n 'do_validation': do_validation,\n 'metrics': callback_metrics or [],\n })\n\n callbacks.on_train_begin()\n\n self.stop_training = False\n\n for epoch in range(initial_epoch, n_epoch):\n callbacks.on_epoch_begin(epoch)\n\n if shuffle:\n np.random.shuffle(index_array)\n\n batches = make_batches(self.n_train_sample, batch_size, epoch)\n epoch_logs = {}\n for batch_index, (batch_start, batch_end) in enumerate(batches):\n batch_ids = index_array[batch_start:batch_end]\n batch_logs = {}\n batch_logs['batch'] = batch_index\n batch_logs['size'] = len(batch_ids)\n\n callbacks.on_batch_begin(batch_index, batch_logs)\n\n # actual training\n outs = f(batch_ids)\n if not isinstance(outs, list):\n outs = [outs]\n for l, o in zip(out_labels, outs):\n batch_logs[l] = o\n\n callbacks.on_batch_end(batch_index, batch_logs)\n\n if batch_index == len(batches) - 1: # last batch\n # validation\n if do_validation:\n val_outs = self._valid_loop(val_f, n_valid_sample,\n batch_size=batch_size)\n if not isinstance(val_outs, list):\n val_outs = [val_outs]\n # same labels assumed\n for l, o in zip(out_labels, val_outs):\n epoch_logs['val_' + l] = o\n\n callbacks.on_epoch_end(epoch, epoch_logs)\n\n if self.stop_training:\n break\n\n # Tracks the timing of everything except train_end\n # Skips train_end otherwise timing can't be included in summary callback\n fit_total_time = time.time() - time_start\n fit_callback_time = callbacks.cb_time\n self.history.fit_total_time = fit_total_time\n self.history.fit_callback_time = fit_callback_time\n self.history.fit_train_time = fit_total_time - fit_callback_time\n \n callbacks.on_train_end()\n\n return self.history\n\n def _valid_loop(self, f, n_sample, batch_size=100):\n \"\"\"Abstract method to loop over some data in batches.\n \n # Arguments\n f: Backend function returning a list of tensors.\n n_sample: integer of number of samples in data.\n batch_size: integer batch size.\n \n # Returns\n Scalar loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics).\n \"\"\"\n\n outs = []\n batches = make_batches(n_sample, batch_size)\n index_array = np.arange(n_sample, dtype='int32')\n for batch_index, (batch_start, batch_end) in enumerate(batches):\n batch_ids = index_array[batch_start:batch_end]\n batch_outs = f(batch_ids)\n if isinstance(batch_outs, list):\n if batch_index == 0:\n for batch_out in enumerate(batch_outs):\n outs.append(0.)\n for i, batch_out in enumerate(batch_outs):\n outs[i] += batch_out * len(batch_ids)\n else:\n if batch_index == 0:\n outs.append(0.)\n outs[0] += batch_outs * len(batch_ids)\n\n for i, out in enumerate(outs):\n outs[i] /= n_sample\n if len(outs) == 1:\n return outs[0]\n return outs\n\n def fit(self,\n x,\n batch_size=100,\n n_epoch=10,\n callbacks=None,\n validation_data=None,\n shuffle=True,\n initial_epoch=0):\n \"\"\"Trains the model for a fixed number of epochs (iterations on a dataset).\n \n # Arguments\n x: Theano shared array of training data\n batch_size: integer. Number of samples per gradient update.\n n_epoch: integer, the number of times to iterate\n over the training data arrays.\n callbacks: list of callbacks to be called during training.\n validation_data: Theano shared array of data on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n shuffle: boolean, whether to shuffle the training data\n before each epoch.\n initial_epoch: epoch at which to start training\n (useful for resuming a previous training run)\n \n # Returns\n A `History` instance. Its `history` attribute contains\n all information collected during training.\n \"\"\"\n self.train_data = x\n self.n_train_sample = B.eval(x.shape[0])\n self.validation_data = validation_data\n\n # makes the generic indices to access data\n self.train_index = B.placeholder(shape=(batch_size,),\n dtype=B.intx(), name='train_index')\n\n # makes the training functions\n self._make_train_function()\n f = self.train_function\n\n # preps for validation\n out_labels = ['cost']\n if validation_data:\n self.valid_index = B.placeholder(shape=(batch_size,),\n dtype=B.intx(), name='valid_index')\n callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]\n self._make_validation_function()\n val_f = self.validation_function\n else:\n callback_metrics = copy.copy(out_labels)\n val_f = None\n\n # delegate logic to _fit_loop\n return self._fit_loop(f, out_labels=out_labels,\n batch_size=batch_size, n_epoch=n_epoch,\n callbacks=callbacks,\n val_f=val_f, shuffle=shuffle,\n callback_metrics=callback_metrics,\n initial_epoch=initial_epoch)\n\n def train_on_batch(self, x):\n \"\"\"Runs a single gradient update on a single batch of data.\n # Arguments\n x: Numpy array of training data,\n or list of Numpy arrays if the model has multiple inputs.\n If all inputs in the model are named,\n you can also pass a dictionary\n mapping input names to Numpy arrays.\n # Returns\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics).\n \"\"\"\n\n # makes the generic indices to access data\n batch_size = B.eval(x.shape)[0]\n self.train_index = B.placeholder(shape=(batch_size,),\n dtype=B.intx(), name='train_index')\n self.train_data = x\n index = np.arange(batch_size)\n\n self._make_train_function()\n outputs = self.train_function(index)\n\n return outputs\n\n def predict_on_batch(self, x):\n \"\"\"Runs a single gradient update on a single batch of data.\n # Arguments\n x: Numpy array of training data,\n or list of Numpy arrays if the model has multiple inputs.\n If all inputs in the model are named,\n you can also pass a dictionary\n mapping input names to Numpy arrays.\n # Returns\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics).\n \"\"\"\n\n # makes the generic indices to access data\n batch_size = B.eval(x.shape)[0]\n self.test_index = B.placeholder(shape=(batch_size,),\n dtype=B.intx(), name='test_index')\n self.test_data = x\n index = np.arange(batch_size)\n\n self._make_test_function()\n outputs = self.test_function(index)\n\n return outputs\n" ]
[ [ "numpy.asarray", "numpy.ones", "numpy.concatenate", "numpy.savez_compressed", "numpy.where", "numpy.zeros" ], [ "numpy.all", "numpy.zeros", "numpy.ones" ], [ "numpy.mod", "numpy.arange", "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emilleishida/resspect_metric
[ "92f0b5d9de9cd6a031ec67fd76f8d302be0efef8" ]
[ "posteriors/fiducial/get_cosmo_posteriors.py" ]
[ "case = 'fiducial'\n\nimport pandas as pd\nimport numpy as np\nimport pystan\nimport os\nfrom resspect.salt3_utils import get_distances\nimport pickle\nimport time\nfrom shutil import copyfile\n\n\n\nfit_lightcurves = False\nrestart_master = True\n\n# number of bins for SALT2mu\nnbins = 70\n\n# rather to re-write fitres file\nreplace_z = True\nadd_lowz = True\nbias = True\n\n###########################################################################################\n# translate ids ###################################\n###########################################################################################\nSNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},\n 67:41, 52:43, 64:51, 95:60, 994:61, 992:62,\n 993:63, 15:64, 88:70, 92:80, 65:81, 16:83,\n 53:84, 991:90, 6:{1:91, 2:93}}\n\ntypes_names = {90: 'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc', \n 95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf',\n 16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT', \n 993:'CART', 994:'PISN',995:'MLString'}\n\n\n# read plasticc test metadata\ntest_zenodo_meta = '/media/RESSPECT/data/PLAsTiCC/PLAsTiCC_zenodo/plasticc_test_metadata.csv'\ntest_metadata = pd.read_csv(test_zenodo_meta)\n\n# read sample for this case\nfname = '/media/RESSPECT/data/PLAsTiCC/for_metrics/' + case + '_samp.csv'\ndata = pd.read_csv(fname)\n\ndata_new = {}\ndata_new['id'] = data['id'].values\ndata_new['redshift'] = data['redshift'].values\ndata_new['type'] = [types_names[item] for item in data['code'].values]\ndata_new['code'] = []\ndata_new['orig_sample'] = ['test' for i in range(data.shape[0])]\ndata_new['queryable'] = [True for i in range(data.shape[0])]\ndata_new['code_zenodo'] = data['code'].values\n\nfor i in range(data.shape[0]): \n sncode = data.iloc[i]['code']\n if sncode not in [62, 42, 6]:\n data_new['code'].append(SNANA_types[sncode])\n if SNANA_types[sncode] == 60:\n print('sncode = ', sncode, ' new code=', SNANA_types[sncode])\n else:\n flag = test_metadata['object_id'].values == data.iloc[i]['id']\n submodel = test_metadata[flag]['true_submodel'].values[0]\n data_new['code'].append(SNANA_types[sncode][submodel])\n \ndata_out = pd.DataFrame(data_new)\ndata_out.to_csv('results/' + case + '_photoids_plasticc.dat', index=False)\n\n###################################################################################\n###################################################################################\n\n\nres = {}\n\nif fit_lightcurves:\n \n start_time = time.time()\n \n print('********* Fitting light curves ******************')\n\n fname = 'results/' + case + '_photoids_plasticc.dat'\n \n meta = pd.read_csv(fname, index_col=False)\n codes = np.unique(meta['code'].values)\n\n res = get_distances(fname,\n data_prefix='LSST_DDF',\n data_folder='/media/RESSPECT/data/PLAsTiCC/SNANA', \n select_modelnum=None,\n salt2mu_prefix='test_salt2mu_res',\n maxsnnum=50000,\n select_orig_sample=['test'],\n salt3_outfile='salt3pipeinput.txt',\n data_prefix_has_sntype=False,\n master_fitres_name='results/master_fitres.fitres', \n append_master_fitres=True,\n restart_master_fitres=restart_master)\n \n res['distances'].to_csv('results/mu_photoIa_plasticc_' + case + '.dat', index=False)\n res['cosmopars'].to_csv('results/cosmo_photoIa_plasticc_' + case + '.dat', index=False)\n \n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n# SALT2mu input file name\nsalt2mu_fname = 'SALT2mu.input'\n\n\nif replace_z:\n if add_lowz:\n if bias:\n # path to lowz fitres\n fitres_lowz_fname = '/media/RESSPECT/data/temp_lowz_sim/lowz_only_fittres.fitres'\n \n else:\n raise ValueError('Low-z without bias not implemented yet.')\n \n fitres_lowz = pd.read_csv(fitres_lowz_fname, index_col=False, comment=\"#\", \n skip_blank_lines=True, delim_whitespace=True)\n \n fitres_lowz['zHD'] = fitres_lowz['SIM_ZCMB']\n\n # path to main fitres\n fitres_main_fname = 'results/master_fitres.fitres'\n \n # read fitres\n fitres_main = pd.read_csv(fitres_main_fname, index_col=False, comment=\"#\", \n skip_blank_lines=True, delim_whitespace=True)\n\n if add_lowz:\n # join samples considering only common columns\n frames = [fitres_lowz, fitres_main]\n fitres = pd.concat(frames, ignore_index=True)\n else:\n fitres = fitres_main \n \n # update redshift value\n fitres['zHD'] = fitres['SIM_ZCMB']\n\n # replace nans with number so SNANA recognizes the columns\n fitres.fillna(value=-99, inplace=True)\n\n # save combined fitres to file\n if add_lowz:\n if bias:\n fitres.to_csv('results/master_fitres_new_lowz_withbias.fitres', sep=\" \", index=False)\n else:\n fitres.to_csv('results/master_fitres_new_lowz_nobias.fitres', sep=\" \", index=False)\n else:\n fitres.to_csv('results/master_fitres_new.fitres', sep=\" \", index=False)\n \nsamples_dir = '/media/RESSPECT/data/PLAsTiCC/for_metrics/posteriors/' + case + '/'\nif not os.path.isdir(samples_dir):\n os.makedirs(samples_dir)\n\n# change parameters for SALT2mu\nop = open(salt2mu_fname, 'r')\nlin = op.readlines()\nop.close()\n\nlin[0] = 'bins=' + str(nbins) + '\\n'\n\n\nif add_lowz:\n if bias:\n lin[-3] = 'prefix=results/test_salt2mu_lowz_withbias_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new_lowz_withbias.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_lowz_withbias_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_lowz_withbias_' + case + '.csv'\n else:\n lin[-3] = 'prefix=results/test_salt2mu_lowz_nobias_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new_lowz_nobias.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_lowz_nobias_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_lowz_npbias_' + case + '.csv'\nelse:\n lin[-3] = 'prefix=results/test_salt2mu_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_' + case + '.csv'\n\nop2 = open(salt2mu_fname, 'w')\nfor line in lin:\n op2.write(line)\nop2.close()\n\n# get distances from SALT2MU\nos.system('SALT2mu.exe ' + salt2mu_fname)\n\n# read data for Bayesian model\nfitres_comb = pd.read_csv(fitres_comb_fname, index_col=False, comment=\"#\", skip_blank_lines=True, \n delim_whitespace=True)\n\n# set initial conditions\nz0 = 0\nE0 = 0\nc = 3e5\nH0 = 70\n\n# remove duplicated redshift\nfitres_final = fitres_comb.drop_duplicates(subset=['SIM_ZCMB'], keep='first')\n\n# order data according to redshift \nindx = np.argsort(fitres_final['SIM_ZCMB'].values)\n\n# create input data\nstan_input = {}\nstan_input['nobs'] = fitres_final.shape[0]\nstan_input['z'] = fitres_final['SIM_ZCMB'].values[indx]\nstan_input['mu'] = fitres_final['MU'].values[indx]\nstan_input['muerr'] = fitres_final['MUERR'].values[indx]\nstan_input['z0'] = z0\nstan_input['H0'] = H0\nstan_input['c'] = c\nstan_input['E0'] = np.array([E0])\n\n# save only stan input to file\nstan_input2 = {}\nstan_input2['z'] = stan_input['z']\nstan_input2['mu'] = stan_input['mu']\nstan_input2['muerr'] = stan_input['muerr']\n\nstan_input_tofile = pd.DataFrame(stan_input2)\n\nstan_input_tofile[['z', 'mu', 'muerr']].to_csv(stan_input_fname, index=False)\n\nstan_model=\"\"\"\nfunctions {\n /** \n * ODE for the inverse Hubble parameter. \n * System State E is 1 dimensional. \n * The system has 2 parameters theta = (om, w)\n * \n * where \n * \n * om: dark matter energy density \n * w: dark energy equation of state parameter\n *\n * The system redshift derivative is \n * \n * d.E[1] / d.z = \n * 1.0/sqrt(om * pow(1+z,3) + (1-om) * (1+z)^(3 * (1+w)))\n * \n * @param z redshift at which derivatives are evaluated. \n * @param E system state at which derivatives are evaluated. \n * @param params parameters for system. \n * @param x_r real constants for system (empty). \n * @param x_i integer constants for system (empty). \n */ \n real[] Ez(real z,\n real[] H,\n real[] params,\n real[] x_r,\n int[] x_i) {\n real dEdz[1];\n dEdz[1] = 1.0/sqrt(params[1]*(1+z)^3\n +(1-params[1])*(1+z)^(3*(1+params[2])));\n return dEdz;\n } \n}\ndata {\n int<lower=1> nobs; // number of data points\n real E0[1]; // integral(1/H) at z=0 \n real z0; // initial redshift, 0\n real c; // speed of light\n real H0; // hubble parameter\n real mu[nobs]; // distance modulus\n vector[nobs] muerr; // error in distance modulus\n real<lower=0> z[nobs]; // redshift\n}\ntransformed data {\n real x_r[0]; // required by ODE (empty)\n int x_i[0]; \n}\nparameters{\n real<lower=0, upper=1> om; // dark matter energy density\n real<lower=-2, upper=0> w; // dark energy equation of state parameter\n}\ntransformed parameters{\n real DC[nobs,1]; // co-moving distance \n real pars[2]; // ODE input = (om, w)\n real dl[nobs]; // luminosity distance\n real DH; // Hubble distance = c/H0\n \n \n DH = (c/H0);\n pars[1] = om;\n pars[2] = w;\n \n // Integral of 1/E(z) \n DC = integrate_ode_rk45(Ez, E0, z0, z, pars, x_r, x_i);\n for (i in 1:nobs) {\n dl[i] = 25 + 5 * log10(DH * (1 + z[i]) * DC[i, 1]);\n }\n}\nmodel{\n // priors and likelihood\n om ~ normal(0.3, 0.1);\n w ~ normal(-1, 0.2);\n\n mu ~ normal(dl, muerr);\n}\ngenerated quantities {\n vector[nobs] log_lik;\n vector[nobs] mu_hat;\n \n for (j in 1:nobs) {\n log_lik[j] = normal_lpdf(mu[j] | dl[j], muerr[j]);\n mu_hat[j] = normal_rng(dl[j], muerr[j]);\n }\n}\n\"\"\"\n\nmodel = pystan.StanModel(model_code=stan_model)\n\nfit = model.sampling(data=stan_input, iter=16000, chains=3, warmup=10000, control={'adapt_delta':0.99})\n\n# print summary\nres = fit.stansummary(pars=[\"om\", \"w\"])\ncheck = str(pystan.check_hmc_diagnostics(fit))\nprint(res)\nprint( ' ******* ')\nprint(check)\n\n\nif add_lowz and bias:\n summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_withbias.dat'\n summ_fname2 = 'results/stan_summary_' + case + '_lowz_withbias.dat'\n chains_fname = samples_dir + '/chains_' + case + '_lowz_withbias.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_withbias.png'\n trace_fname2 = 'results/trace_plot_' + case + '_lowz_withbias.png'\nelif add_lowz and not bias:\n summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_nobias.dat'\n summ_fname2 = 'results/stan_summary_' + case + '_lowz_nobias.dat'\n chains_fname = samples_dir + '/chains_' + case + '_lowz_nobias.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_nobias.png'\n trace_fname2 = 'results/trace_plot_' + case + '_lowz_nobias.png'\nelse:\n summ_fname = samples_dir + 'stan_summary_' + case + '.dat'\n summ_fname2 = 'results/stan_summary_' + case + '.dat'\n chains_fname = samples_dir + '/chains_' + case + '.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '.png'\n trace_fname2 = 'results/trace_plot_' + case + '.png'\n\nop2 = open(summ_fname, 'w')\nop2.write(res)\nop2.write('\\n ************* \\n')\nop2.write(check)\nop2.close()\n\nsamples = fit.extract(permuted=True)\n\npickle.dump(samples, open(chains_fname, \"wb\"))\n\npystan.check_hmc_diagnostics(fit)\n\n# plot chains\nimport arviz\nimport matplotlib.pyplot as plt\n\narviz.plot_trace(fit, ['om', 'w'])\nplt.savefig(trace_fname)\n\ncopyfile(trace_fname, trace_fname2)\ncopyfile(summ_fname, summ_fname2)" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.unique", "matplotlib.pyplot.savefig", "pandas.DataFrame", "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
bmaranville/orsopy
[ "74083afdce8f8f1ab3866c7f1f5209942c8734db", "74083afdce8f8f1ab3866c7f1f5209942c8734db" ]
[ "tests/test_slddb/test_dbcreation.py", "orsopy/slddb/element_table/element.py" ]
[ "import sys\nimport unittest\n\nfrom numpy import ndarray, testing\n\nfrom orsopy.slddb import SLDDB, dbconfig, element_table\n\n\nclass TestCreateDB(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.db = SLDDB(\":memory:\")\n cls.db.create_database()\n\n @classmethod\n def tearDownClass(cls):\n del cls.db\n\n def test_tables(self):\n c = self.db.db.cursor()\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n items = c.fetchall()\n for i, tbl in enumerate([dbconfig.DB_MATERIALS_NAME]):\n with self.subTest(msg=tbl, i=i):\n self.assertTrue((tbl,) in items)\n\n def test_element_search(self):\n with self.subTest(\"database search\", i=0):\n s1 = element_table.get_element(\"Si\")\n s2 = element_table.get_element(14)\n with self.subTest(\"equality\", i=0):\n self.assertEqual(s1.Z, s2.Z)\n self.assertEqual(s1.symbol, s2.symbol)\n self.assertEqual(s1.mass, s2.mass)\n self.assertEqual(s1.b, s2.b)\n testing.assert_array_equal(s1._xdata, s2._xdata)\n\n def test_add_field(self):\n global dbconfig\n # call without changes\n self.db.update_fields()\n\n # call with appending column\n dbconfig.DB_MATERIALS_FIELDS.append(\"testadd\")\n dbconfig.DB_MATERIALS_CONVERTERS.append(dbconfig.DB_MATERIALS_CONVERTERS[-1])\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.append(dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n\n self.db.update_fields()\n\n # call with inserted column\n dbconfig.DB_MATERIALS_FIELDS.insert(5, \"testadd2\")\n dbconfig.DB_MATERIALS_CONVERTERS.insert(5, dbconfig.DB_MATERIALS_CONVERTERS[-1])\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.insert(5, dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n self.db.update_fields()\n\n # reset database\n dbconfig.DB_MATERIALS_FIELDS.pop(-1)\n dbconfig.DB_MATERIALS_FIELDS.pop(5)\n dbconfig.DB_MATERIALS_CONVERTERS.pop(-1)\n dbconfig.DB_MATERIALS_CONVERTERS.pop(5)\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(-1)\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(5)\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n self.db = SLDDB(\":memory:\")\n self.db.create_database()\n\n def test_backup(self):\n if sys.version_info.minor > 6:\n self.db.backup(\":memory:\")\n", "\"\"\"\nDefining the Element class that is used to hold all needed data for one element/isotope.\n\"\"\"\n\nimport os\n\nfrom numpy import array, load\n\nfrom .masses import ATOMIC_WEIGHTS, ELEMENT_CHARGES, ELEMENT_FULLNAMES\nfrom .nabs_geant4 import DATA_DIR as NABS_DATA_DIR\nfrom .nabs_geant4 import NEUTRON_ABSORPTIONS\nfrom .nlengths_pt import NEUTRON_SCATTERING_LENGTHS\nfrom .xray_nist import XRAY_SCATTERING_FACTORS\n\nBASE_PATH = os.path.abspath(os.path.dirname(__file__))\n\nELEMENT_NAMES = dict([(value, key) for key, value in ELEMENT_CHARGES.items()])\n\nELEMENT_FULLNAMES[\"D\"] = \"deuterium\"\nELEMENT_FULLNAMES[\"Hx\"] = \"exchangeable hydrogen\"\n\nfor data in [ATOMIC_WEIGHTS, NEUTRON_SCATTERING_LENGTHS]:\n data[\"D\"] = data[(1, 2)]\n data[\"Hx\"] = data[(1, 1)]\nfor data in [ELEMENT_CHARGES, XRAY_SCATTERING_FACTORS]:\n data[\"Hx\"] = data[\"H\"]\n\n\nclass Element:\n N = None\n\n def __init__(self, symbol=None, Z=None):\n # get element from database\n N = None\n self.symbol = symbol\n if Z is None and symbol is None:\n raise ValueError(\"Provide either Symbol or Z\")\n elif Z is None:\n if \"[\" in symbol:\n self.symbol, N = symbol.rstrip(\"]\").split(\"[\", 1)\n N = int(N)\n key = (ELEMENT_CHARGES[self.symbol], N)\n else:\n key = symbol\n self.Z = ELEMENT_CHARGES[self.symbol]\n else:\n self.Z = Z\n self.symbol = ELEMENT_NAMES[Z]\n key = self.symbol\n\n self.N = N\n self.name = ELEMENT_FULLNAMES[self.symbol]\n self.mass = ATOMIC_WEIGHTS[key]\n try:\n self.b = NEUTRON_SCATTERING_LENGTHS[key]\n except KeyError:\n raise ValueError(f\"No neutorn scattering data for {key}\")\n if key in NEUTRON_ABSORPTIONS:\n self._ndata = load(os.path.join(BASE_PATH, NABS_DATA_DIR, NEUTRON_ABSORPTIONS[key]))[\"arr_0\"]\n else:\n self._ndata = None\n\n try:\n self._xdata = array(XRAY_SCATTERING_FACTORS[self.symbol])\n except KeyError:\n self._xdata = None\n\n def f_of_E(self, Ei):\n if self._xdata is None:\n return float(\"nan\")\n E, fp, fpp = self._xdata\n fltr = E >= Ei\n if not fltr.any():\n return 0.0 - 0j\n else:\n # linear interpolation between two nearest points\n E1 = E[fltr][0]\n try:\n E2 = E[fltr][1]\n except IndexError:\n return fp[fltr][0] - 1j * fpp[fltr][0]\n else:\n f1 = fp[fltr][0] - 1j * fpp[fltr][0]\n f2 = fp[fltr][1] - 1j * fpp[fltr][1]\n return ((E2 - Ei) * f1 + (Ei - E1) * f2) / (E2 - E1)\n\n def b_of_L(self, Li):\n if self._ndata is None:\n return self.b\n L, b_abs = self._ndata\n if Li > L[-1]:\n return self.b.real - 1j * b_abs[-1]\n if Li < L[0]:\n return self.b.real - 1j * b_abs[0]\n fltr = L >= Li\n # linear interpolation between two nearest points\n L1 = L[fltr][0]\n try:\n L2 = L[fltr][1]\n except IndexError:\n return self.b.real - 1j * b_abs[fltr][0]\n else:\n b_abs1 = b_abs[fltr][0]\n b_abs2 = b_abs[fltr][1]\n return self.b.real - 1j * ((L2 - Li) * b_abs1 + (Li - L1) * b_abs2) / (L2 - L1)\n\n @property\n def E(self):\n return self._xdata[0]\n\n @property\n def f(self):\n return self._xdata[1] - 1j * self._xdata[2]\n\n @property\n def fp(self):\n return self._xdata[1]\n\n @property\n def fpp(self):\n return self._xdata[2]\n\n @property\n def has_ndata(self):\n return self._ndata is not None\n\n @property\n def Lamda(self):\n # return neutron wavelength values for energy dependant absorption\n if self.has_ndata:\n return self._ndata[0]\n else:\n return array([0.05, 50.0])\n\n @property\n def b_abs(self):\n return self._ndata[1]\n\n @property\n def b_lambda(self):\n if self.has_ndata:\n return self.b.real - 1j * self._ndata[1]\n else:\n return array([self.b, self.b])\n\n def __str__(self):\n if self.N is None:\n return self.symbol\n else:\n return \"%s[%s]\" % (self.symbol, self.N)\n\n def __repr__(self):\n if self.N is None:\n symb = self.symbol\n else:\n symb = \"%s[%s]\" % (self.symbol, self.N)\n return 'Element(symbol=\"%s\")' % symb\n\n def __eq__(self, other):\n if type(self) == type(other):\n return self.N == other.N and self.Z == other.Z and self.symbol == other.symbol\n else:\n return object.__eq__(self, other)\n\n def __hash__(self):\n return hash((self.N, self.Z, self.symbol))\n" ]
[ [ "numpy.testing.assert_array_equal" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Katsute/Baruch-CIS-3120-Assignments
[ "2cb470a7e3b7bf2d49da520fdff079f832624c06" ]
[ "classwork/05_01_2021/plt.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\n\nx = [5, 2, 9, 4, 7]\ny = [10, 5, 8, 4, 2]\n\nplt.plot(x, y)\nplt.show()\n\nplt.bar(x, y)\nplt.show()\n\nplt.hist(x)\nplt.show()\n\ndf = pd.DataFrame({'x': x, 'y': y})\ndf.plot('x', 'y', kind=\"scatter\")\nplt.show()\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.bar", "matplotlib.pyplot.show", "matplotlib.pyplot.hist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
AnglinaBhambra/OpenSarToolkit
[ "b2d6562a77eea86b4c236cc14f81f73ff4e75c17" ]
[ "ost/helpers/vector.py" ]
[ "import os\nimport sys\nimport json\nfrom functools import partial\n\nimport osr\nimport ogr\nimport pyproj\nimport geopandas as gpd\n\nfrom shapely.ops import transform\nfrom shapely.wkt import loads\nfrom shapely.geometry import Point, Polygon, mapping, shape\nfrom fiona import collection\nfrom fiona.crs import from_epsg\n\n\ndef get_epsg(prjfile):\n '''Get the epsg code from a projection file of a shapefile\n\n Args:\n prjfile: a .prj file of a shapefile\n\n Returns:\n str: EPSG code\n\n '''\n\n prj_file = open(prjfile, 'r')\n prj_txt = prj_file.read()\n srs = osr.SpatialReference()\n srs.ImportFromESRI([prj_txt])\n srs.AutoIdentifyEPSG()\n # return EPSG code\n return srs.GetAuthorityCode(None)\n\n\ndef get_proj4(prjfile):\n '''Get the proj4 string from a projection file of a shapefile\n\n Args:\n prjfile: a .prj file of a shapefile\n\n Returns:\n str: PROJ4 code\n\n '''\n\n prj_file = open(prjfile, 'r')\n prj_string = prj_file.read()\n\n # Lambert error\n if '\\\"Lambert_Conformal_Conic\\\"' in prj_string:\n\n print(' ERROR: It seems you used an ESRI generated shapefile'\n ' with Lambert Conformal Conic projection. ')\n print(' This one is not compatible with Open Standard OGR/GDAL'\n ' tools used here. ')\n print(' Reproject your shapefile to a standard Lat/Long projection'\n ' and try again')\n exit(1)\n\n srs = osr.SpatialReference()\n srs.ImportFromESRI([prj_string])\n return srs.ExportToProj4()\n\n\ndef epsg_to_wkt_projection(epsg_code):\n \n spatial_ref = osr.SpatialReference()\n spatial_ref.ImportFromEPSG(epsg_code) \n \n return spatial_ref.ExpotToWkt()\n\n\ndef reproject_geometry(geom, inproj4, out_epsg):\n '''Reproject a wkt geometry based on EPSG code\n\n Args:\n geom (ogr-geom): an ogr geom objecct\n inproj4 (str): a proj4 string\n out_epsg (str): the EPSG code to which the geometry should transformed\n\n Returns\n geom (ogr-geometry object): the transformed geometry\n\n '''\n\n geom = ogr.CreateGeometryFromWkt(geom)\n # input SpatialReference\n spatial_ref_in = osr.SpatialReference()\n spatial_ref_in.ImportFromProj4(inproj4)\n\n # output SpatialReference\n spatial_ref_out = osr.SpatialReference()\n spatial_ref_out.ImportFromEPSG(int(out_epsg))\n\n # create the CoordinateTransformation\n coord_transform = osr.CoordinateTransformation(spatial_ref_in,\n spatial_ref_out)\n try:\n geom.Transform(coord_transform)\n except:\n print(' ERROR: Not able to transform the geometry')\n sys.exit()\n\n return geom\n\n\ndef geodesic_point_buffer(lat, lon, meters, envelope=False):\n\n # get WGS 84 proj\n proj_wgs84 = pyproj.Proj(init='epsg:4326')\n\n # Azimuthal equidistant projection\n aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'\n project = partial(\n pyproj.transform,\n pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),\n proj_wgs84)\n\n buf = Point(0, 0).buffer(meters) # distance in metres\n\n if envelope is True:\n geom = Polygon(transform(project, buf).exterior.coords[:]).envelope\n else:\n geom = Polygon(transform(project, buf).exterior.coords[:])\n\n return geom.to_wkt()\n\n\ndef latlon_to_wkt(lat, lon, buffer_degree=None, buffer_meter=None, envelope=False):\n '''A helper function to create a WKT representation of Lat/Lon pair\n\n This function takes lat and lon vale and returns the WKT Point\n representation by default.\n\n A buffer can be set in metres, which returns a WKT POLYGON. If envelope\n is set to True, the buffer will be squared by the extent buffer radius.\n\n Args:\n lat (str): Latitude (deg) of a point\n lon (str): Longitude (deg) of a point\n buffer (float): optional buffer around the point\n envelope (bool): gives a square instead of a circular buffer\n (only applies if bufferis set)\n\n Returns:\n wkt (str): WKT string\n\n '''\n\n if buffer_degree is None and buffer_meter is None:\n aoi_wkt = 'POINT ({} {})'.format(lon, lat)\n\n elif buffer_degree:\n aoi_geom = loads('POINT ({} {})'.format(lon, lat)).buffer(buffer_degree)\n if envelope:\n aoi_geom = aoi_geom.envelope\n\n aoi_wkt = aoi_geom.to_wkt()\n\n elif buffer_meter:\n aoi_wkt = geodesic_point_buffer(lat, lon, buffer_meter, envelope)\n\n return aoi_wkt\n\n\ndef wkt_manipulations(wkt, buffer=None, convex=False, envelope=False):\n\n geom = ogr.CreateGeometryFromWkt(wkt)\n\n if buffer:\n geom = geom.Buffer(buffer)\n\n if convex:\n geom = geom.ConvexHull()\n\n if envelope:\n geom = geom.GetEnvelope()\n geom = ogr.CreateGeometryFromWkt(\n 'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}, {} {}))'.format(\n geom[1], geom[3], geom[0], geom[3], geom[0], geom[2],\n geom[1], geom[2], geom[1], geom[3], geom[1], geom[3]))\n\n return geom.ExportToWkt()\n\n\ndef shp_to_wkt(shapefile, buffer=None, convex=False, envelope=False):\n '''A helper function to translate a shapefile into WKT\n\n\n '''\n\n # get filepaths and proj4 string\n shpfile = os.path.abspath(shapefile)\n prjfile = shpfile[:-4] + '.prj'\n proj4 = get_proj4(prjfile)\n\n lyr_name = os.path.basename(shapefile)[:-4]\n shp = ogr.Open(os.path.abspath(shapefile))\n lyr = shp.GetLayerByName(lyr_name)\n geom = ogr.Geometry(ogr.wkbGeometryCollection)\n\n for feat in lyr:\n geom.AddGeometry(feat.GetGeometryRef())\n wkt = geom.ExportToWkt()\n\n if proj4 != '+proj=longlat +datum=WGS84 +no_defs':\n print(' INFO: Reprojecting AOI file to Lat/Long (WGS84)')\n wkt = reproject_geometry(wkt, proj4, 4326).ExportToWkt()\n\n # do manipulations if needed\n wkt = wkt_manipulations(wkt, buffer=buffer, convex=convex,\n envelope=envelope)\n\n return wkt\n\n\ndef kml_to_wkt(kmlfile):\n\n shp = ogr.Open(os.path.abspath(kmlfile))\n lyr = shp.GetLayerByName()\n for feat in lyr:\n geom = feat.GetGeometryRef()\n wkt = str(geom)\n\n return wkt\n\n\ndef latlon_to_shp(lon, lat, shapefile):\n\n shapefile = str(shapefile)\n\n schema = {'geometry': 'Point',\n 'properties': {'id': 'str'}}\n\n wkt = loads('POINT ({} {})'.format(lon, lat))\n\n with collection(shapefile, \"w\",\n crs=from_epsg(4326),\n driver=\"ESRI Shapefile\",\n schema=schema) as output:\n\n output.write({'geometry': mapping(wkt),\n 'properties': {'id': '1'}})\n\n\ndef shp_to_gdf(shapefile):\n\n gdf = gpd.GeoDataFrame.from_file(shapefile)\n\n prjfile = shapefile[:-4] + '.prj'\n proj4 = get_proj4(prjfile)\n\n if proj4 != '+proj=longlat +datum=WGS84 +no_defs':\n print(' INFO: reprojecting AOI layer to WGS84.')\n # reproject\n gdf.crs = (proj4)\n gdf = gdf.to_crs({'init': 'epsg:4326'})\n\n return gdf\n\n\ndef wkt_to_gdf(wkt):\n \n geometry = loads(wkt)\n # point wkt\n if geometry.geom_type == 'Point':\n data = {'id': ['1'],\n 'geometry': loads(wkt).buffer(0.05).envelope}\n gdf = gpd.GeoDataFrame(data)\n \n # polygon wkt\n elif geometry.geom_type == 'Polygon':\n data = {'id': ['1'],\n 'geometry': loads(wkt)}\n gdf = gpd.GeoDataFrame(data)\n\n # geometry collection of single multiploygon\n elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1 and 'MULTIPOLYGON' in str(geometry):\n\n data = {'id': ['1'],\n 'geometry': geometry}\n gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})\n \n ids, feats =[], []\n for i, feat in enumerate(gdf.geometry.values[0]):\n ids.append(i)\n feats.append(feat)\n\n gdf = gpd.GeoDataFrame({'id': ids,\n 'geometry': feats}, \n geometry='geometry', \n crs = gdf.crs\n )\n \n # geometry collection of single polygon\n elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1:\n \n data = {'id': ['1'],\n 'geometry': geometry}\n gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})\n\n # everything else (hopefully)\n else:\n\n i, ids, geoms = 1, [], []\n for geom in geometry:\n ids.append(i)\n geoms.append(geom)\n i += 1\n\n gdf = gpd.GeoDataFrame({'id': ids,\n 'geometry': geoms},\n crs = {'init': 'epsg:4326', 'no_defs': True}\n )\n \n return gdf\n\n\ndef wkt_to_shp(wkt, outfile):\n\n gdf = wkt_to_gdf(wkt)\n gdf.to_file(outfile)\n\n\ndef gdf_to_json_geometry(gdf):\n \"\"\"Function to parse features from GeoDataFrame in such a manner \n that rasterio wants them\"\"\"\n# \n# try:\n# gdf.geometry.values[0].type\n# features = [json.loads(gdf.to_json())['features'][0]['geometry']]\n# except AttributeError:\n# ids, feats =[], []\n# for i, feat in enumerate(gdf.geometry.values[0]):\n# ids.append(i)\n# feats.append(feat)\n#\n# gdf = gpd.GeoDataFrame({'id': ids,\n# 'geometry': feats}, \n# geometry='geometry', \n# crs = gdf.crs\n# )\n geojson = json.loads(gdf.to_json())\n return [feature['geometry'] for feature in geojson['features'] \n if feature['geometry']]\n\n\ndef inventory_to_shp(inventory_df, outfile):\n\n # change datetime datatypes\n inventory_df['acquisitiondate'] = inventory_df[\n 'acquisitiondate'].astype(str)\n inventory_df['ingestiondate'] = inventory_df['ingestiondate'].astype(str)\n inventory_df['beginposition'] = inventory_df['beginposition'].astype(str)\n inventory_df['endposition'] = inventory_df['endposition'].astype(str)\n\n # write to shapefile\n inventory_df.to_file(outfile)\n\n\ndef exterior(infile, outfile, buffer=None):\n\n gdf = gpd.read_file(infile, crs={'init': 'EPSG:4326'})\n gdf.geometry = gdf.geometry.apply(lambda row: Polygon(row.exterior))\n gdf_clean = gdf[gdf.geometry.area >= 1.0e-6]\n gdf_clean.geometry = gdf_clean.geometry.buffer(-0.0018)\n #if buffer:\n # gdf.geometry = gdf.geometry.apply(\n # lambda row: Polygon(row.buffer(-0.0018)))\n gdf_clean.to_file(outfile)\n\n\ndef difference(infile1, infile2, outfile):\n\n gdf1 = gpd.read_file(infile1)\n gdf2 = gpd.read_file(infile2)\n\n gdf3 = gpd.overlay(gdf1, gdf2, how='symmetric_difference')\n\n gdf3.to_file(outfile)\n\n\ndef buffer_shape(infile, outfile, buffer=None):\n\n with collection(infile, \"r\") as in_shape:\n # schema = in_shape.schema.copy()\n schema = {'geometry': 'Polygon', 'properties': {'id': 'int'}}\n crs = in_shape.crs\n with collection(\n outfile, \"w\", \"ESRI Shapefile\", schema, crs=crs) as output:\n\n for i, point in enumerate(in_shape):\n output.write({\n 'properties': {\n 'id': i\n },\n 'geometry': mapping(\n shape(point['geometry']).buffer(buffer))\n })\n\n\ndef plot_inventory(aoi, inventory_df, transparency=0.05, annotate = False):\n\n import matplotlib.pyplot as plt\n\n # load world borders for background\n world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\n # import aoi as gdf\n aoi_gdf = wkt_to_gdf(aoi)\n\n # get bounds of AOI\n bounds = inventory_df.geometry.bounds\n\n # get world map as base\n base = world.plot(color='lightgrey', edgecolor='white')\n\n # plot aoi\n aoi_gdf.plot(ax=base, color='None', edgecolor='black')\n\n # plot footprints\n inventory_df.plot(ax=base, alpha=transparency)\n\n # set bounds\n plt.xlim([bounds.minx.min()-2, bounds.maxx.max()+2])\n plt.ylim([bounds.miny.min()-2, bounds.maxy.max()+2])\n plt.grid(color='grey', linestyle='-', linewidth=0.2)\n if annotate:\n import math\n for idx, row in inventory_df.iterrows():\n # print([row['geometry'].bounds[0],row['geometry'].bounds[3]])\n coord = [row['geometry'].centroid.x, row['geometry'].centroid.y]\n x1, y2, x2, y1 = row['geometry'].bounds\n angle = math.degrees(math.atan2((y2 - y1), (x2 - x1)))\n # rint(angle)\n plt.annotate(s=row['bid'], xy=coord, rotation=angle + 5, size=10, color='red', horizontalalignment='center')\n" ]
[ [ "matplotlib.pyplot.annotate", "matplotlib.pyplot.grid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tawatts1/chess
[ "cb2917ec689bb8db1dc2436ed2ef6463319876a7" ]
[ "analyze_results.py" ]
[ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n \ndef get_data(fname):\n out = []\n with open(fname, 'r') as f:\n for line in f:\n datum = []\n for entry in line.split(','):\n datum.append(float(entry))\n out.append(datum)\n return np.array(out)\n\nfiles = os.listdir(\"game_results\")\n\n\n\nfor file in files:\n if file[0] != '.':\n print(file)\n data = get_data(f\"game_results/{file}\")\n fig, (ax1, ax2) = plt.subplots(1,2)\n fig.suptitle(file)\n mean = np.mean(data[:,0])\n print(np.shape(data))\n # deviation for 95 pct confidence interval:\n dev = 1.96*np.std(data[:,0])/ np.sqrt( np.shape(data)[0] )\n c0,c1 = mean-dev, mean+dev\n \n ax1.hist(data[:,0])\n ax1.set_title(\"White performance\")\n #ax1.figtext(.5,.01,f\"{file} and such and such\")\n ax2.hist(data[:,1])\n ax2.set_title(\"Game length\")\n #plt.figtext(.5,.01,f\"{file} and such and such\")\n plt.figtext(.5,.03,f\"The mean of white's performance is {mean:.3f}, with CI ({c0:.3f}, {c1:.3f}). \", wrap=True, ha=\"center\")\n plt.savefig(\"images/\" + file+\".png\", dpi = 300)\n #plt.show()\n\n " ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figtext", "numpy.std", "numpy.mean", "numpy.shape", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jasonrobwebster/sampling-importance-resampling-example
[ "250e54815f73ccf071a4dad8d62a2bd7ec38c0c2" ]
[ "linear.py" ]
[ "import numpy as np\nfrom scipy.special import softmax\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n data_size = 100\n true_grad = 3\n true_intercept = 1\n true_sig = 1\n\n x = np.linspace(0, 10, data_size)\n # y = m x + c\n y_obs = true_grad * x + true_intercept + np.random.normal(loc=0, scale=true_sig, size=data_size)\n\n M = 2000000\n m = M // 20 # M/m is usually around 20\n print(f'Generating {M} initial samples, and {m} re-samples')\n\n # sample M params from initial prior\n grad_prior = np.random.uniform(-10, 10, M) # m ~ U(-10, 10)\n intercept_prior = np.random.uniform(-10, 10, M) # c ~ U(-10, 10)\n sig_prior = np.random.uniform(0.1, 20, M) # sig ~ U(0.1, 10)\n\n # calculate importance weights, assuming that we model y ~ N(mu, sig)\n exponent = 1 / (2 * sig_prior ** 2) \\\n * np.sum([(y_obs[i] - (grad_prior * x[i] + intercept_prior)) ** 2 for i in range(data_size)], axis=0)\n\n log_weights = - data_size * np.log(sig_prior * np.sqrt(2 * np.pi)) - exponent\n\n weights = softmax(log_weights)\n\n # resample params using the above weights to get posterior\n grad_posterior = np.random.choice(grad_prior, m, p=weights)\n intercept_posterior = np.random.choice(intercept_prior, m, p=weights)\n sig_posterior = np.random.choice(sig_prior, m, p=weights)\n\n # report summary stats\n print(f'True gradient: {true_grad}')\n print(f'True intercept: {true_intercept}')\n print(f'True sigma: {true_sig}')\n print(f'Gradient posterior: mean={np.mean(grad_posterior):.3} - sd={np.std(grad_posterior):.3}')\n print(f'Intercept posterior: mean={np.mean(intercept_posterior):.3} - sd={np.std(intercept_posterior):.3}')\n print(f'Sigma posterior: mean={np.mean(sig_posterior):.3} - sd={np.std(sig_posterior):.3}')\n\n # plot the new samples\n fig, axes = plt.subplots(1, 4, figsize=(12, 3))\n\n axes[0].set_title('Data')\n axes[1].set_title('Gradient Posterior')\n axes[2].set_title('Intercept Posterior')\n axes[3].set_title('Sigma Posterior')\n\n axes[0].plot(x, y_obs, 'x')\n sns.distplot(grad_posterior, ax=axes[1])\n sns.distplot(intercept_posterior, ax=axes[2])\n sns.distplot(sig_posterior, ax=axes[3])\n plt.show()\n\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.set_xlabel('Gradient')\n ax.set_ylabel('Intercept')\n ax.set_title('Joint distribution p(m, c)')\n sns.kdeplot(grad_posterior, intercept_posterior, shade=True, ax=ax)\n plt.show()\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.random.choice", "matplotlib.pyplot.subplots", "numpy.random.normal", "numpy.std", "numpy.mean", "numpy.random.uniform", "matplotlib.pyplot.show", "scipy.special.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
RichardLeeK/CNM
[ "a3c15cb0a0373d6ad03c5a815a7e020f90ab8522" ]
[ "Plateau/preprocess_image.py" ]
[ "import numpy as np\n\nfrom env import Env\n\ndef fill(image,x_idx,y_idx,bound,value):\n if (x_idx<0) or (x_idx>=900):\n return image\n elif (y_idx<0) or (y_idx>=110):\n return image\n elif image[x_idx][y_idx]>=bound:\n return image\n else:\n image[x_idx][y_idx]=value\n return image\n \ndef fill_edge(image,x_idx,y_idx,value,bound,dist=1):\n fill(image,x_idx-dist,y_idx,bound,value)\n fill(image,x_idx-dist,y_idx-dist,bound,value)\n fill(image,x_idx-dist,y_idx+dist,bound,value)\n \n fill(image,x_idx+dist,y_idx,bound,value)\n fill(image,x_idx+dist,y_idx-dist,bound,value)\n fill(image,x_idx+dist,y_idx+dist,bound,value)\n \n fill(image,x_idx,y_idx-dist,bound,value)\n fill(image,x_idx,y_idx+dist,bound,value)\n\ndef transform_img(data,window=900,y_range=110,step=60):\n icps=np.int64(data[1])\n # icps=np.array([icp for icp in icps if 0<icp<=y_range])\n image_set=[]\n start_time=0\n while start_time<(len(icps)-window):\n image=np.zeros((window,y_range), dtype=np.uint8)\n for time_idx in range(0,window):\n time=start_time+time_idx\n y_idx=icps[time]-1\n if y_idx<y_range:\n image[time_idx][y_idx]=255\n fill_edge(image,time_idx,y_idx,value=128,bound=255,dist=1)\n image_set.append(image.T)\n start_time=start_time+step\n return np.array(image_set)\n\ndef transform_imgdict(dataset,window=900,y_range=110,step=60):\n imgdict=dict()\n for i in range(len(dataset)):\n imgset=transform_img(dataset[i],window=window,y_range=y_range,step=step)\n imgdict[i]=imgset\n return imgdict" ]
[ [ "numpy.int64", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NVlabs/sionna
[ "527d0f7866b379afffad34a6bef7ed3bf6f33ad2", "488e6c3ff6ff2b3313d0ca0f94e4247b8dd6ff35", "527d0f7866b379afffad34a6bef7ed3bf6f33ad2" ]
[ "test/test_conv_encoding.py", "sionna/signal/upsampling.py", "test/test_3gpp_channel_lsp.py" ]
[ "#\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\ntry:\n import sionna\nexcept ImportError as e:\n import sys\n sys.path.append(\"../\")\nimport unittest\nimport numpy as np\nimport tensorflow as tf\ngpus = tf.config.list_physical_devices('GPU')\nprint('Number of GPUs available :', len(gpus))\nif gpus:\n gpu_num = 0 # Number of the GPU to be used\n try:\n tf.config.set_visible_devices(gpus[gpu_num], 'GPU')\n print('Only GPU number', gpu_num, 'used.')\n tf.config.experimental.set_memory_growth(gpus[gpu_num], True)\n except RuntimeError as e:\n print(e)\nfrom sionna.fec.conv import ConvEncoder\nfrom sionna.utils import BinarySource\n\nclass TestConvEncoding(unittest.TestCase):\n\n def test_output_dim(self):\n r\"\"\"Test with allzero codeword that output dims are correct (=n) and output also equals all-zero.\"\"\"\n\n bs = 10\n coderates = [1/2, 1/3]\n ks = [10, 20, 50, 100]\n\n for rate in coderates:\n for k in ks:\n n = int(k/rate) # calculate coderate\n enc = ConvEncoder(rate=rate, constraint_length=5)\n u = np.zeros([bs, k])\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n # test that output dim can change (in eager mode)\n k = k+1 # increase length\n n = int(k/rate) # calculate coderate\n u = np.zeros([bs, k])\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n def test_invalid_inputs(self):\n r\"\"\"Test with invalid rate values and invalid constraint lengths as input.\n Only rates [1/2, 1/3] and constraint lengths [3, 4, 5, 6, 7, 8] are accepted currently.\"\"\"\n rate_invalid = [0.2, 0.45, 0.01]\n rate_valid = [1/3, 1/2]\n\n constraint_length_invalid = [2, 9, 0]\n constraint_length_valid = [3, 4, 5, 6, 7, 8]\n for rate in rate_valid:\n for mu in constraint_length_invalid:\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(rate=rate, constraint_length=mu)\n\n for rate in rate_invalid:\n for mu in constraint_length_valid:\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(rate=rate, constraint_length= mu)\n \n gmat = [['101', '111', '000'], ['000', '010', '011']]\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(gen_poly=gmat)\n \n def test_polynomial_input(self):\n r\"\"\"Test that different formats of input polynomials are accepted and raises exceptions when the generator polynomials fail assertions.\"\"\"\n\n bs = 10\n k = 100\n rate = 1/2\n n = int(k/rate) # calculate coderate\n u = np.zeros([bs, k])\n\n g1 = ['101', '111']\n g2 = np.array(g1)\n\n g = [g1, g2]\n for gen_poly in g:\n enc = ConvEncoder(gen_poly=gen_poly)\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n def util_check_assertion_err(gen_poly_, msg_):\n with self.assertRaises(AssertionError) as exception_context:\n enc = ConvEncoder(gen_poly=gen_poly_)\n self.assertEqual(str(exception_context.exception), msg_)\n\n gs = [\n ['1001', '111'],\n ['1001', 111],\n ('1211', '1101')]\n msg_s = [\n \"Each polynomial must be of same length.\",\n \"Each polynomial must be a string.\",\n \"Each Polynomial must be a string of 0/1 s.\"\n ]\n for idx, g in enumerate(gs):\n util_check_assertion_err(g,msg_s[idx])\n\n def test_keras(self):\n \"\"\"Test that Keras model can be compiled (+supports dynamic shapes).\"\"\"\n bs = 10\n k = 100\n\n source = BinarySource()\n inputs = tf.keras.Input(shape=(k), dtype=tf.float32)\n x = ConvEncoder(rate=0.5, constraint_length=4)(inputs)\n model = tf.keras.Model(inputs=inputs, outputs=x)\n\n b = source([bs, k])\n model(b)\n # call twice to see that bs can change\n b2 = source([bs+1, k])\n model(b2)\n \n model.summary()\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=8)\n u = source([1, 32])\n x = enc(u)\n print(x.shape)\n u = source([2, 30])\n x = enc(u)\n print(x.shape)\n\n def test_multi_dimensional(self):\n \"\"\"Test against arbitrary shapes\n \"\"\"\n k = 120\n n = 240 # rate must be 1/2 or 1/3\n\n source = BinarySource()\n enc = ConvEncoder(rate=k/n, constraint_length=5)\n\n b = source([100, k])\n b_res = tf.reshape(b, [4, 5, 5, k])\n\n # encode 2D Tensor\n c = enc(b).numpy()\n # encode 4D Tensor\n c_res = enc(b_res).numpy()\n\n # test that shape was preserved\n self.assertTrue(c_res.shape[:-1]==b_res.shape[:-1])\n\n\n # and reshape to 2D shape\n c_res = tf.reshape(c_res, [100,n])\n # both version should yield same result\n self.assertTrue(np.array_equal(c, c_res))\n\n def test_ref_implementation(self):\n r\"\"\"Test against pre-encoded codewords from reference implementation.\n \"\"\"\n ref_path = 'codes/conv/'\n gs = [\n ['101', '111'],\n ['1101', '1111'],\n ['101', '111', '111'],\n ['101', '111', '111', '111']]\n gen_strs = [\n 'conv_rate_half_57_', \n 'conv_rate_half_6474_',\n 'conv_rate_onethird_577_',\n 'conv_rate_onefourth_5777_'] \n rs=[1/2, 1/2, 1/3, 1/4] \n mus = [3, 4, 3, 3]\n for idx, gen_poly in enumerate(gs):\n enc = ConvEncoder(gen_poly=gen_poly)\n gen_str = gen_strs[idx]\n u = np.load(ref_path + gen_str + 'ref_u.npy')\n cref = np.load(ref_path + gen_str + 'ref_x.npy')\n c = enc(u).numpy()\n self.assertTrue(np.array_equal(c, cref))\n \n if idx in [0, 2]:\n enc = ConvEncoder(rate=rs[idx], constraint_length=mus[idx]) \n c = enc(u).numpy()\n self.assertTrue(np.array_equal(c, cref)) \n \n def test_batch(self):\n \"\"\"Test that all samples in batch yield same output (for same input).\n \"\"\"\n bs = 100\n k = 120\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=7)\n\n b = source([1, 15, k])\n b_rep = tf.tile(b, [bs, 1, 1])\n\n # and run tf version (to be tested)\n c = enc(b_rep).numpy()\n\n for i in range(bs):\n self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))\n\n def test_dtypes_flexible(self):\n \"\"\"Test that encoder supports variable dtypes and\n yields same result.\"\"\"\n\n dt_supported = (tf.float16, tf.float32, tf.float64, tf.int8,\n tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32)\n\n bs = 10\n k = 32\n\n source = BinarySource()\n\n enc_ref = ConvEncoder(rate=0.5,\n constraint_length=7,\n output_dtype=tf.float32)\n\n u = source([bs, k])\n c_ref = enc_ref(u)\n\n for dt in dt_supported:\n enc = ConvEncoder(rate=0.5,\n constraint_length=7,\n output_dtype=dt)\n u_dt = tf.cast(u, dt)\n c = enc(u_dt)\n\n c_32 = tf.cast(c, tf.float32)\n\n self.assertTrue(np.array_equal(c_ref.numpy(), c_32.numpy()))\n\n def test_tf_fun(self):\n \"\"\"Test that tf.function decorator works and XLA is supported\"\"\"\n\n @tf.function\n def run_graph(u):\n return enc(u)\n\n @tf.function(jit_compile=True)\n def run_graph_xla(u):\n return enc(u)\n\n bs = 10\n k = 100\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=7)\n\n # test that for arbitrary input only 0,1 values are outputed\n u = source([bs, k])\n x = run_graph(u).numpy()\n\n # execute the graph twice\n x = run_graph(u).numpy()\n\n # and change batch_size\n u = source([bs+1, k])\n x = run_graph(u).numpy()\n\n #check XLA\n x = run_graph_xla(u).numpy()\n u = source([bs, k])\n x = run_graph_xla(u).numpy()\n\n", "#\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"Layers implementing upsampling\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.experimental.numpy import swapaxes\nfrom sionna.utils.tensors import flatten_last_dims\n\nclass Upsampling(Layer):\n \"\"\"Upsampling(samples_per_symbol, axis=-1, **kwargs)\n\n Upsamples a tensor along a specified axis by inserting zeros\n between samples.\n\n Parameters\n ----------\n samples_per_symbol: int\n The upsampling factor. If ``samples_per_symbol`` is equal to `n`,\n then the upsampled axis will be `n`-times longer.\n\n axis: int\n The dimension to be up-sampled. Must not be the first dimension.\n\n Input\n -----\n x : [...,n,...], tf.DType\n The tensor to be upsampled. `n` is the size of the `axis` dimension.\n\n Output\n ------\n y : [...,n*samples_per_symbol,...], same dtype as ``x``\n The upsampled tensor.\n \"\"\"\n def __init__(self, samples_per_symbol, axis=-1, **kwargs):\n super().__init__(**kwargs)\n self._samples_per_symbol = samples_per_symbol\n self._axis = axis\n\n def build(self, input_shape):\n paddings = []\n for _ in range(len(input_shape)):\n paddings.append([0, 0])\n paddings.append([0, self._samples_per_symbol-1])\n self._paddings = paddings\n\n def call(self, inputs):\n x = swapaxes(inputs, self._axis, -1)\n x = tf.expand_dims(x, -1)\n x = tf.pad(x,\n self._paddings,\n constant_values=tf.cast(0, dtype=x.dtype))\n x = flatten_last_dims(x, 2)\n x = swapaxes(x, -1, self._axis)\n return x\n", "#\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\ntry:\n import sionna\nexcept ImportError as e:\n import sys\n sys.path.append(\"../\")\nimport tensorflow as tf\ngpus = tf.config.list_physical_devices('GPU')\nprint('Number of GPUs available :', len(gpus))\nif gpus:\n gpu_num = 0 # Number of the GPU to be used\n try:\n tf.config.set_visible_devices(gpus[gpu_num], 'GPU')\n print('Only GPU number', gpu_num, 'used.')\n tf.config.experimental.set_memory_growth(gpus[gpu_num], True)\n except RuntimeError as e:\n print(e)\n\nimport unittest\nimport numpy as np\nimport sionna\nfrom channel_test_utils import *\nfrom scipy.stats import kstest, norm\n\n\nclass TestLSP(unittest.TestCase):\n r\"\"\"Test the distribution, cross-correlation, and spatial correlation of\n 3GPP channel models' LSPs\n \"\"\"\n\n # Carrier frequency\n CARRIER_FREQUENCY = 3.5e9 # Hz\n\n # Heigh of UTs\n H_UT = 1.5\n\n # Heigh of BSs\n H_BS = 35.0\n\n # Batch size for generating samples of LSPs and pathlosses\n BATCH_SIZE = 500000\n\n # More than one UT is required for testing the spatial and cross-correlation\n # of LSPs\n NB_UT = 5\n\n # The LSPs follow either a Gaussian or a truncated Gaussian\n # distribution. A Kolmogorov-Smirnov (KS) test is used to check that the\n # LSP follow the appropriate distribution. This is the threshold below\n # which the KS statistic `D` should be for passing the test.\n MAX_ERR_KS = 1e-2\n\n # # Maximum allowed deviation for cross-correlation of LSP parameters\n MAX_ERR_CROSS_CORR = 3e-2\n\n # # Maximum allowed deviation for spatial correlation of LSP parameters\n MAX_ERR_SPAT_CORR = 3e-2\n\n # LoS probability\n MAX_ERR_LOS_PROB = 1e-2\n\n # ZOD Offset maximum relative error\n MAX_ERR_ZOD_OFFSET = 1e-2\n\n # Maximum allowed deviation for pathloss\n MAX_ERR_PATHLOSS_MEAN = 1.0\n MAX_ERR_PATHLOSS_STD = 1e-1\n\n def limited_normal(self, batch_size, minval, maxval, mu, std):\n r\"\"\"\n Return a limited normal distribution. This is different from a truncated\n normal distribution, as the samples exceed ``minval`` and ``maxval`` are\n clipped.\n\n More precisely, ``x`` is generated as follows:\n 1. Sample ``y`` of shape [``batch_size``] from a Gaussian distribution N(mu,std)\n 2. x = max(min(x, maxval), minval)\n \"\"\"\n x = np.random.normal(size=[batch_size])\n x = np.maximum(x, minval)\n x = np.minimum(x, maxval)\n x = std*x+mu\n return x\n\n def setUpClass():\n r\"\"\"Sample LSPs and pathlosses from all channel models for testing\"\"\"\n\n # Forcing the seed to make the tests deterministic\n tf.random.set_seed(42)\n np.random.seed(42)\n\n nb_bs = 1\n fc = TestLSP.CARRIER_FREQUENCY\n h_ut = TestLSP.H_UT\n h_bs = TestLSP.H_BS\n batch_size = TestLSP.BATCH_SIZE\n nb_ut = TestLSP.NB_UT\n\n # UT and BS arrays have no impact on LSP\n # However, these are needed to instantiate the model\n bs_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=2,\n num_cols_per_panel=2,\n polarization='dual',\n polarization_type='VH',\n antenna_pattern='38.901',\n carrier_frequency=fc,\n dtype=tf.complex128)\n ut_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=1,\n num_cols_per_panel=1,\n polarization='dual',\n polarization_type='VH',\n antenna_pattern='38.901',\n carrier_frequency=fc,\n dtype=tf.complex128)\n\n # The following quantities have no impact on LSP\n # However,these are needed to instantiate the model\n ut_orientations = tf.zeros([batch_size, nb_ut], dtype=tf.float64)\n bs_orientations = tf.zeros([batch_size, nb_ut], dtype=tf.float64)\n ut_velocities = tf.zeros([batch_size, nb_ut], dtype=tf.float64)\n\n # LSPs, ZoD offset, pathlosses\n TestLSP.lsp_samples = {}\n TestLSP.zod_offset = {}\n TestLSP.pathlosses = {}\n TestLSP.los_prob = {}\n\n ut_loc = generate_random_loc(batch_size, nb_ut, (100,2000),\n (100,2000), (h_ut, h_ut),\n share_loc=True, dtype=tf.float64)\n bs_loc = generate_random_loc(batch_size, nb_bs, (0,100),\n (0,100), (h_bs, h_bs),\n share_loc=True, dtype=tf.float64)\n\n ####### RMa\n TestLSP.lsp_samples['rma'] = {}\n TestLSP.zod_offset['rma'] = {}\n TestLSP.pathlosses['rma'] = {}\n scenario = sionna.channel.tr38901.RMaScenario( fc,\n ut_array,\n bs_array,\n \"uplink\",\n dtype=tf.complex128)\n lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)\n\n # LoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, True)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['rma']['los'] = lsp_sampler()\n TestLSP.zod_offset['rma']['los'] = scenario.zod_offset\n TestLSP.pathlosses['rma']['los'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # NLoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, False)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['rma']['nlos'] = lsp_sampler()\n TestLSP.zod_offset['rma']['nlos'] = scenario.zod_offset\n TestLSP.pathlosses['rma']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # Indoor\n in_state = generate_random_bool(batch_size, nb_ut, 1.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['rma']['o2i'] = lsp_sampler()\n TestLSP.zod_offset['rma']['o2i'] = scenario.zod_offset\n TestLSP.pathlosses['rma']['o2i'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n TestLSP.los_prob['rma'] = scenario.los_probability.numpy()\n\n TestLSP.rma_w = scenario.average_street_width\n TestLSP.rma_h = scenario.average_building_height\n\n ####### UMi\n TestLSP.lsp_samples['umi'] = {}\n TestLSP.zod_offset['umi'] = {}\n TestLSP.pathlosses['umi'] = {}\n scenario = sionna.channel.tr38901.UMiScenario( fc,\n 'low',\n ut_array,\n bs_array,\n \"uplink\",\n dtype=tf.complex128)\n lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)\n\n # LoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, True)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['umi']['los'] = lsp_sampler()\n TestLSP.zod_offset['umi']['los'] = scenario.zod_offset\n TestLSP.pathlosses['umi']['los'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # NLoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, False)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['umi']['nlos'] = lsp_sampler()\n TestLSP.zod_offset['umi']['nlos'] = scenario.zod_offset\n TestLSP.pathlosses['umi']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # Indoor\n in_state = generate_random_bool(batch_size, nb_ut, 1.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['umi']['o2i'] = lsp_sampler()\n TestLSP.zod_offset['umi']['o2i'] = scenario.zod_offset\n TestLSP.pathlosses['umi']['o2i-low'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n TestLSP.los_prob['umi'] = scenario.los_probability.numpy()\n\n ####### UMa\n TestLSP.lsp_samples['uma'] = {}\n TestLSP.zod_offset['uma'] = {}\n TestLSP.pathlosses['uma'] = {}\n scenario = sionna.channel.tr38901.UMaScenario( fc,\n 'low',\n ut_array,\n bs_array,\n \"uplink\",\n dtype=tf.complex128)\n lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)\n\n # LoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, True)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['uma']['los'] = lsp_sampler()\n TestLSP.zod_offset['uma']['los'] = scenario.zod_offset\n TestLSP.pathlosses['uma']['los'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # NLoS\n in_state = generate_random_bool(batch_size, nb_ut, 0.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state, False)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['uma']['nlos'] = lsp_sampler()\n TestLSP.zod_offset['uma']['nlos'] = scenario.zod_offset\n TestLSP.pathlosses['uma']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # Indoor\n in_state = generate_random_bool(batch_size, nb_ut, 1.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state)\n lsp_sampler.topology_updated_callback()\n TestLSP.lsp_samples['uma']['o2i'] = lsp_sampler()\n TestLSP.zod_offset['uma']['o2i'] = scenario.zod_offset\n TestLSP.pathlosses['uma']['o2i-low'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n TestLSP.los_prob['uma'] = scenario.los_probability.numpy()\n\n # Sample pathlosses with high O2I loss model. Only with UMi and UMa\n ####### UMi-High\n scenario = sionna.channel.tr38901.UMiScenario( fc,\n 'high',\n ut_array,\n bs_array,\n \"uplink\",\n dtype=tf.complex128)\n lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)\n in_state = generate_random_bool(batch_size, nb_ut, 1.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state)\n lsp_sampler.topology_updated_callback()\n TestLSP.pathlosses['umi']['o2i-high'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n ####### UMa-high\n scenario = sionna.channel.tr38901.UMaScenario( fc,\n 'high',\n ut_array,\n bs_array,\n \"uplink\",\n dtype=tf.complex128)\n lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)\n in_state = generate_random_bool(batch_size, nb_ut, 1.0)\n scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,\n ut_velocities, in_state)\n lsp_sampler.topology_updated_callback()\n TestLSP.pathlosses['uma']['o2i-high'] = lsp_sampler.sample_pathloss()[:,0,:]\n\n # The following values do not depend on the scenario\n TestLSP.d_2d = scenario.distance_2d.numpy()\n TestLSP.d_2d_ut = scenario.matrix_ut_distance_2d.numpy()\n TestLSP.d_2d_out = scenario.distance_2d_out.numpy()\n TestLSP.d_3d = scenario.distance_3d[0,0,:].numpy()\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_ds_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP DS\"\"\"\n samples = TestLSP.lsp_samples[model][submodel].ds[:,0,0].numpy()\n samples = np.log10(samples)\n mu, std = log10DS(model, submodel, TestLSP.CARRIER_FREQUENCY)\n D,_ = kstest(samples, norm.cdf, args=(mu, std))\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_asa_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP ASA\"\"\"\n samples = TestLSP.lsp_samples[model][submodel].asa[:,0,0].numpy()\n samples = np.log10(samples)\n mu, std = log10ASA(model, submodel, TestLSP.CARRIER_FREQUENCY)\n a = -np.inf\n b = (np.log10(104)-mu)/std\n samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)\n # KS-test does not work great with discontinuties.\n # Therefore, we test only the contunuous part of the CDF, and also test\n # that the maximum value allowed is not exceeded\n maxval = np.max(samples)\n samples = samples[samples < np.log10(104)]\n samples_ref = samples_ref[samples_ref < np.log10(104)]\n D,_ = kstest(samples, samples_ref)\n self.assertLessEqual(maxval, np.log10(104), f\"{model}:{submodel}\")\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_asd_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP ASD\"\"\"\n samples = TestLSP.lsp_samples[model][submodel].asd.numpy()\n samples = np.log10(samples)\n mu, std = log10ASD(model, submodel, TestLSP.CARRIER_FREQUENCY)\n a = -np.inf\n b = (np.log10(104)-mu)/std\n samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)\n # KS-test does not work great with discontinuties.\n # Therefore, we test only the contunuous part of the CDF, and also test\n # that the maximum value allowed is not exceeded\n maxval = np.max(samples)\n samples = samples[samples < np.log10(104)]\n samples_ref = samples_ref[samples_ref < np.log10(104)]\n D,_ = kstest(samples, samples_ref)\n self.assertLessEqual(maxval, np.log10(104), f\"{model}:{submodel}\")\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_zsa_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP ZSA\"\"\"\n samples = TestLSP.lsp_samples[model][submodel].zsa[:,0,0].numpy()\n samples = np.log10(samples)\n mu, std = log10ZSA(model, submodel, TestLSP.CARRIER_FREQUENCY)\n a = -np.inf\n b = (np.log10(52)-mu)/std\n samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)\n # KS-test does not work great with discontinuties.\n # Therefore, we test only the contunuous part of the CDF, and also test\n # that the maximum value allowed is not exceeded\n maxval = np.max(samples)\n samples = samples[samples < np.log10(52)]\n samples_ref = samples_ref[samples_ref < np.log10(52)]\n D,_ = kstest(samples, samples_ref)\n self.assertLessEqual(maxval, np.log10(52), f\"{model}:{submodel}\")\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_zsd_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP ZSD\"\"\"\n d_2d = TestLSP.d_2d[0,0,0]\n samples = TestLSP.lsp_samples[model][submodel].zsd[:,0,0].numpy()\n samples = np.log10(samples)\n mu, std = log10ZSD(model, submodel, d_2d, TestLSP.CARRIER_FREQUENCY,\n TestLSP.H_BS, TestLSP.H_UT)\n a = -np.inf\n b = (np.log10(52)-mu)/std\n samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)\n # KS-test does not work great with discontinuties.\n # Therefore, we test only the contunuous part of the CDF, and also test\n # that the maximum value allowed is not exceeded\n maxval = np.max(samples)\n samples = samples[samples < np.log10(52)]\n samples_ref = samples_ref[samples_ref < np.log10(52)]\n D,_ = kstest(samples, samples_ref)\n self.assertLessEqual(maxval, np.log10(52), f\"{model}:{submodel}\")\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_sf_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP SF\"\"\"\n d_2d = TestLSP.d_2d[0,0,0]\n samples = TestLSP.lsp_samples[model][submodel].sf[:,0,0].numpy()\n samples = 10.0*np.log10(samples)\n mu, std = log10SF_dB(model, submodel, d_2d, TestLSP.CARRIER_FREQUENCY,\n TestLSP.H_BS, TestLSP.H_UT)\n D,_ = kstest(samples, norm.cdf, args=(mu, std))\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los',))\n def test_k_dist(self, model, submodel):\n \"\"\"Test the distribution of LSP K\"\"\"\n samples = TestLSP.lsp_samples[model][submodel].k_factor[:,0,0].numpy()\n samples = 10.0*np.log10(samples)\n mu, std = log10K_dB(model, submodel)\n D,_ = kstest(samples, norm.cdf, args=(mu, std))\n self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_cross_correlation(self, model, submodel):\n \"\"\"Test the LSP cross correlation\"\"\"\n lsp_list = []\n ds_samples = TestLSP.lsp_samples[model][submodel].ds[:,0,0].numpy()\n ds_samples = np.log10(ds_samples)\n lsp_list.append(ds_samples)\n asd_samples = TestLSP.lsp_samples[model][submodel].asd[:,0,0].numpy()\n asd_samples = np.log10(asd_samples)\n lsp_list.append(asd_samples)\n asa_samples = TestLSP.lsp_samples[model][submodel].asa[:,0,0].numpy()\n asa_samples = np.log10(asa_samples)\n lsp_list.append(asa_samples)\n sf_samples = TestLSP.lsp_samples[model][submodel].sf[:,0,0].numpy()\n sf_samples = np.log10(sf_samples)\n lsp_list.append(sf_samples)\n if submodel == 'los':\n k_samples = TestLSP.lsp_samples[model][submodel].k_factor[:,0,0]\n k_samples = np.log10(k_samples.numpy())\n lsp_list.append(k_samples)\n zsa_samples = TestLSP.lsp_samples[model][submodel].zsa[:,0,0].numpy()\n zsa_samples = np.log10(zsa_samples)\n lsp_list.append(zsa_samples)\n zsd_samples = TestLSP.lsp_samples[model][submodel].zsd[:,0,0].numpy()\n zsd_samples = np.log10(zsd_samples)\n lsp_list.append(zsd_samples)\n lsp_list = np.stack(lsp_list, axis=-1)\n cross_corr_measured = np.corrcoef(lsp_list.T)\n abs_err = np.abs(cross_corr(model, submodel) - cross_corr_measured)\n max_err = np.max(abs_err)\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_CROSS_CORR,\n f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_spatial_correlation(self, model, submodel):\n \"\"\"Test the spatial correlation of LSPs\"\"\"\n d_2d_ut = TestLSP.d_2d_ut[0,0]\n #### LoS\n ds_samples = TestLSP.lsp_samples[model][submodel].ds[:,0,:]\n ds_samples = np.log10(ds_samples.numpy())\n asd_samples = TestLSP.lsp_samples[model][submodel].asd[:,0,:]\n asd_samples = np.log10(asd_samples.numpy())\n asa_samples = TestLSP.lsp_samples[model][submodel].asa[:,0,:]\n asa_samples = np.log10(asa_samples.numpy())\n sf_samples = TestLSP.lsp_samples[model][submodel].sf[:,0,:]\n sf_samples = np.log10(sf_samples.numpy())\n if submodel == 'los':\n k_samples = TestLSP.lsp_samples[model][submodel].k_factor[:,0,:]\n k_samples = np.log10(k_samples.numpy())\n zsa_samples = TestLSP.lsp_samples[model][submodel].zsa[:,0,:]\n zsa_samples = np.log10(zsa_samples.numpy())\n zsd_samples = TestLSP.lsp_samples[model][submodel].zsd[:,0,:]\n zsd_samples = np.log10(zsd_samples.numpy())\n #\n C_ds_measured = np.corrcoef(ds_samples.T)[0]\n C_asd_measured = np.corrcoef(asd_samples.T)[0]\n C_asa_measured = np.corrcoef(asa_samples.T)[0]\n C_sf_measured = np.corrcoef(sf_samples.T)[0]\n if submodel == 'los':\n C_k_measured = np.corrcoef(k_samples.T)[0]\n C_zsa_measured = np.corrcoef(zsa_samples.T)[0]\n C_zsd_measured = np.corrcoef(zsd_samples.T)[0]\n #\n C_ds = np.exp(-d_2d_ut/corr_dist_ds(model, submodel))\n C_asd = np.exp(-d_2d_ut/corr_dist_asd(model, submodel))\n C_asa = np.exp(-d_2d_ut/corr_dist_asa(model, submodel))\n C_sf = np.exp(-d_2d_ut/corr_dist_sf(model, submodel))\n if submodel == 'los':\n C_k = np.exp(-d_2d_ut/corr_dist_k(model, submodel))\n C_zsa = np.exp(-d_2d_ut/corr_dist_zsa(model, submodel))\n C_zsd = np.exp(-d_2d_ut/corr_dist_zsd(model, submodel))\n #\n ds_max_err = np.max(np.abs(C_ds_measured - C_ds))\n self.assertLessEqual(ds_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n asd_max_err = np.max(np.abs(C_asd_measured - C_asd))\n self.assertLessEqual(asd_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n asa_max_err = np.max(np.abs(C_asa_measured - C_asa))\n self.assertLessEqual(asa_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n sf_max_err = np.max(np.abs(C_sf_measured - C_sf))\n self.assertLessEqual(sf_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n if submodel == 'los':\n k_max_err = np.max(np.abs(C_k_measured - C_k))\n self.assertLessEqual(k_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n zsa_max_err = np.max(np.abs(C_zsa_measured - C_zsa))\n self.assertLessEqual(zsa_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n zsd_max_err = np.max(np.abs(C_zsd_measured - C_zsd))\n self.assertLessEqual(zsd_max_err, TestLSP.MAX_ERR_SPAT_CORR,\n f\"{model}:{submodel}\")\n\n # Submodel is not needed for LoS probability\n @channel_test_on_models(('rma', 'umi', 'uma'), ('foo',))\n def test_los_probability(self, model, submodel):\n \"\"\"Test LoS probability\"\"\"\n d_2d_out = TestLSP.d_2d_out\n h_ut = TestLSP.H_UT\n #\n los_prob_ref = los_probability(model, d_2d_out, h_ut)\n los_prob = TestLSP.los_prob[model]\n #\n max_err = np.max(np.abs(los_prob_ref-los_prob))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_LOS_PROB,\n f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_zod_offset(self, model, submodel):\n \"\"\"Test ZOD offset\"\"\"\n d_2d = self.d_2d\n fc = TestLSP.CARRIER_FREQUENCY\n h_ut = TestLSP.H_UT\n samples = self.zod_offset[model][submodel]\n samples_ref = zod_offset(model, submodel, fc, d_2d, h_ut)\n max_err = np.max(np.abs(samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_ZOD_OFFSET,\n f\"{model}:{submodel}\")\n\n @channel_test_on_models(('rma','umi', 'uma'), ('los', 'nlos', 'o2i'))\n def test_pathloss(self, model, submodel):\n \"\"\"Test the pathloss\"\"\"\n fc = TestLSP.CARRIER_FREQUENCY\n h_ut = TestLSP.H_UT\n h_bs = TestLSP.H_BS\n if model == 'rma':\n samples = TestLSP.pathlosses[model][submodel]\n mean_samples = tf.reduce_mean(samples, axis=0).numpy()\n std_samples = tf.math.reduce_std(samples, axis=0).numpy()\n #\n d_2ds = TestLSP.d_2d[0,0]\n d_3ds = TestLSP.d_3d\n w = TestLSP.rma_w\n h = TestLSP.rma_h\n samples_ref = np.array([pathloss(model, submodel, d_2d, d_3d, fc,\\\n h_bs, h_ut, h, w) for d_2d, d_3d in zip(d_2ds, d_3ds)])\n #\n max_err = np.max(np.abs(mean_samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_MEAN,\n f\"{model}:{submodel}\")\n max_err = np.max(np.abs(std_samples-pathloss_std(model, submodel)))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_STD,\n f\"{model}:{submodel}\")\n elif model == 'umi':\n if submodel == 'o2i':\n for loss_model in ('low', 'high'):\n samples = TestLSP.pathlosses[model][submodel+'-'+loss_model]\n mean_samples = tf.reduce_mean(samples, axis=0).numpy()\n std_samples = tf.math.reduce_std(samples, axis=0).numpy()\n #\n d_2ds = TestLSP.d_2d[0,0]\n d_3ds = TestLSP.d_3d\n samples_ref = np.array([pathloss(model, submodel, d_2d, d_3d,\\\n fc, h_bs, h_ut, loss_model) for d_2d, d_3d in zip(d_2ds, d_3ds)])\n #\n max_err = np.max(np.abs(mean_samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_MEAN,\n f\"{model}:{submodel}\")\n max_err = np.max(np.abs(std_samples-pathloss_std(model, submodel, loss_model)))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_STD,\n f\"{model}:{submodel}\")\n else:\n samples = TestLSP.pathlosses[model][submodel]\n mean_samples = tf.reduce_mean(samples, axis=0).numpy()\n std_samples = tf.math.reduce_std(samples, axis=0).numpy()\n #\n d_2ds = TestLSP.d_2d[0,0]\n d_3ds = TestLSP.d_3d\n samples_ref = np.array([pathloss(model, submodel, d_2d, d_3d,\\\n fc, h_bs, h_ut) for d_2d, d_3d in zip(d_2ds, d_3ds)])\n #\n max_err = np.max(np.abs(mean_samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_MEAN,\n f\"{model}:{submodel}\")\n max_err = np.max(np.abs(std_samples-pathloss_std(model, submodel)))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_STD,\n f\"{model}:{submodel}\")\n elif model == 'uma':\n if submodel == 'o2i':\n for loss_model in ('low', 'high'):\n samples = TestLSP.pathlosses[model][submodel+'-'+loss_model]\n mean_samples = tf.reduce_mean(samples, axis=0).numpy()\n std_samples = tf.math.reduce_std(samples, axis=0).numpy()\n #\n d_2ds = TestLSP.d_2d[0,0]\n d_3ds = TestLSP.d_3d\n samples_ref = np.array([pathloss(model, submodel, d_2d, d_3d,\\\n fc, h_bs, h_ut, loss_model) for d_2d, d_3d in zip(d_2ds, d_3ds)])\n #\n max_err = np.max(np.abs(mean_samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_MEAN,\n f\"{model}:{submodel}\")\n max_err = np.max(np.abs(std_samples-pathloss_std(model, submodel, loss_model)))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_STD,\n f\"{model}:{submodel}\")\n else:\n samples = TestLSP.pathlosses[model][submodel]\n mean_samples = tf.reduce_mean(samples, axis=0).numpy()\n std_samples = tf.math.reduce_std(samples, axis=0).numpy()\n #\n d_2ds = TestLSP.d_2d[0,0]\n d_3ds = TestLSP.d_3d\n samples_ref = np.array([pathloss(model, submodel, d_2d, d_3d,\\\n fc, h_bs, h_ut) for d_2d, d_3d in zip(d_2ds, d_3ds)])\n #\n max_err = np.max(np.abs(mean_samples-samples_ref))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_MEAN,\n f\"{model}:{submodel}\")\n max_err = np.max(np.abs(std_samples-pathloss_std(model, submodel)))\n self.assertLessEqual(max_err, TestLSP.MAX_ERR_PATHLOSS_STD,\n f\"{model}:{submodel}\")\n" ]
[ [ "tensorflow.keras.Input", "numpy.array_equal", "tensorflow.config.experimental.set_memory_growth", "tensorflow.reshape", "tensorflow.cast", "tensorflow.keras.Model", "tensorflow.function", "tensorflow.config.list_physical_devices", "numpy.load", "numpy.array", "numpy.zeros", "tensorflow.tile", "tensorflow.config.set_visible_devices" ], [ "tensorflow.experimental.numpy.swapaxes", "tensorflow.cast", "tensorflow.expand_dims" ], [ "scipy.stats.kstest", "numpy.maximum", "numpy.minimum", "numpy.random.seed", "tensorflow.config.experimental.set_memory_growth", "tensorflow.zeros", "numpy.abs", "tensorflow.reduce_mean", "numpy.stack", "tensorflow.math.reduce_std", "numpy.max", "numpy.random.normal", "numpy.log10", "tensorflow.config.list_physical_devices", "numpy.corrcoef", "tensorflow.random.set_seed", "tensorflow.config.set_visible_devices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
hphphp123321/DeepKE
[ "f7efd3fc87d3bf88783a41efc3c09dca7a986013" ]
[ "example/ner/few-shot/run.py" ]
[ "import os\n\nimport hydra\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='1'\nimport logging\nimport sys\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\")))\n\nfrom hydra import utils\nfrom torch.utils.data import DataLoader\nfrom deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel\nfrom deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset\nfrom deepke.name_entity_re.few_shot.module.train import Trainer\nfrom deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric\nfrom deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed\nfrom deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nfrom tensorboardX import SummaryWriter\nwriter = SummaryWriter(log_dir='logs')\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nDATASET_CLASS = {\n 'conll2003': ConllNERDataset,\n 'mit-movie': ConllNERDataset,\n 'mit-restaurant': ConllNERDataset,\n 'atis': ConllNERDataset\n}\n\nDATA_PROCESS = {\n 'conll2003': ConllNERProcessor,\n 'mit-movie': ConllNERProcessor,\n 'mit-restaurant': ConllNERProcessor,\n 'atis': ConllNERProcessor\n}\n\nDATA_PATH = {\n 'conll2003': {'train': 'data/conll2003/train.txt',\n 'dev': 'data/conll2003/dev.txt',\n 'test': 'data/conll2003/test.txt'},\n 'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',\n 'dev': 'data/mit-movie/test.txt'},\n 'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',\n 'dev': 'data/mit-restaurant/test.txt'},\n 'atis': {'train': 'data/atis/20-shot-train.txt',\n 'dev': 'data/atis/test.txt'}\n}\n\nMAPPING = {\n 'conll2003': {'loc': '<<location>>',\n 'per': '<<person>>',\n 'org': '<<organization>>',\n 'misc': '<<others>>'},\n 'mit-movie': mit_movie_mapping,\n 'mit-restaurant': mit_restaurant_mapping,\n 'atis': atis_mapping\n}\n\[email protected](config_path=\"conf/config.yaml\")\ndef main(cfg):\n cwd = utils.get_original_cwd()\n cfg.cwd = cwd\n print(cfg)\n \n data_path = DATA_PATH[cfg.dataset_name]\n for mode, path in data_path.items():\n data_path[mode] = os.path.join(cfg.cwd, path)\n dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]\n mapping = MAPPING[cfg.dataset_name]\n\n set_seed(cfg.seed) # set seed, default is 1\n if cfg.save_path is not None: # make save_path dir\n cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+\"_\"+str(cfg.batch_size)+\"_\"+str(cfg.learning_rate)+cfg.notes)\n if not os.path.exists(cfg.save_path):\n os.makedirs(cfg.save_path, exist_ok=True)\n \n process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)\n train_dataset = dataset_class(data_processor=process, mode='train')\n train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)\n \n dev_dataset = dataset_class(data_processor=process, mode='dev')\n dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)\n\n label_ids = list(process.mapping2id.values())\n\n prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)\n model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,\n eos_token_id=1,\n max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,\n repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,\n restricter=None)\n metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')\n loss = get_loss\n\n trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,\n metrics=metrics, writer=writer)\n trainer.train()\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]