content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import json
import discord
from discord.ext import commands
from utils import get_color
import datetime
class Modlogs(commands.Cog):
def __init__(self, bot):
self.bot = bot
with open("./bot_config/logging/modlogs_channels.json", "r") as modlogsFile:
self.modlogsFile = json.load(modlogsFile)
@commands.command(name="messagelogschannel",
aliases=["seteditedlogschannel", "setdeletedlogschannel", "setlogschannel", "setlogchannel"],
description="Sets the channel in which edited/deleted message logs are sent.")
async def set_modlogs_channel(self, ctx, channel: discord.TextChannel):
channel_id = channel.id
self.modlogsFile[str(ctx.guild.id)] = int(channel_id)
with open("./bot_config/logging/modlogs_channels.json", "w") as modlogsFile:
json.dump(self.modlogsFile, modlogsFile, indent=4)
await ctx.send(f"Edited/Deleted logs channel set as {channel.mention} succesfully.")
@commands.Cog.listener()
async def on_message_edit(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(id=int(message_channel_id))
if message_channel is None:
return
message_link = f"https://discord.com/channels/{before.guild.id}/{before.channel.id}/{before.id}"
embed = discord.Embed(title=f"Message edited in {before.channel.name}",
color=get_color.get_color(before.author), timestamp=after.created_at)
embed.add_field(name="Before", value=before.content, inline=False)
embed.add_field(name="After", value=after.content, inline=False)
embed.add_field(
name="Link", value=f"__[Message]({message_link})__", inline=False)
embed.set_footer(text=f"Author • {before.author} | Edited")
embed.set_thumbnail(url=before.author.avatar_url)
# the edited timestamp would come in the right, so we dont need to specify it in the footer
try:
await message_channel.send(embed=embed)
except: # embeds dont have a message.content, so it gives us an error
pass
# from mahasvan#0001 ape botman.py
@commands.Cog.listener()
async def on_message_delete(self, message):
embed = discord.Embed(title=f"Message deleted in {message.channel.name}",
color=get_color.get_color(message.author), timestamp=message.created_at)
embed.add_field(name="Content", value=message.content, inline=False)
embed.set_footer(text=f"Author • {message.author} | Created", icon_url=message.author.avatar_url)
# the edited timestamp would come in the right, so we dont need to specify it in the footer
message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(message.guild.id))))
if message_channel is None:
return
await message_channel.send(embed=embed)
@commands.Cog.listener()
async def on_bulk_message_delete(self, messages):
if self.modlogsFile.get(str(messages[0].guild.id)) is None:
return
with open(f"./bot_config/tempText/{messages[0].guild.id}.txt", "w") as temp_textfile:
for x in messages:
line1 = f"From: {x.author} | in: {x.channel.name} | Created at: {x.created_at}\n"
temp_textfile.write(line1)
temp_textfile.write(f"{x.content}\n\n")
file = discord.File(f"./bot_config/tempText/{messages[0].guild.id}.txt")
message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(messages[0].guild.id))))
if message_channel is None:
return
await message_channel.send(file=file, content=f"{len(messages)} messages deleted. "
f"Sending information as text file.")
# member update event
@commands.Cog.listener()
async def on_member_update(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(id=int(message_channel_id))
if message_channel is None:
return
# nickname change
if not before.nick == after.nick:
embed = discord.Embed(title=f"{before}'s nickname has been updated", description=f"ID: {before.id}",
color=get_color.get_color(after), timestamp=before.created_at)
embed.add_field(
name="Before", value=before.display_name, inline=False)
embed.add_field(
name="After", value=after.display_name, inline=False)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# role change
if not before.roles == after.roles:
embed = discord.Embed(title=f"{before}'s roles have been updated", description=f"ID: {before.id}",
color=after.color, timestamp=before.created_at)
before_roles_str, after_roles_str = "", ""
for x in before.roles[::-1]:
before_roles_str += f"{x.mention} "
for x in after.roles[::-1]:
after_roles_str += f"{x.mention} "
embed.add_field(
name="Before", value=before_roles_str, inline=False)
embed.add_field(name="After", value=after_roles_str, inline=False)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# from mahasvan#0001 ape botman.py
# ban event
@commands.Cog.listener()
async def on_member_ban(self, guild, member:discord.Member):
message = discord.Message
message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(guild.id))))
if message_channel is None:
return
embed = discord.Embed(title="**Member Banned**",
color=member.color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.add_field(name=f"{member} was banned from the server",
value=f"**Moderator**: {message.author}")
embed.set_footer(text=f"UUID: {member.id}")
await message_channel.send(embed=embed)
# unban event
@commands.Cog.listener()
async def on_member_unban(self, guild, member: discord.Member):
message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(guild.id))))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} has been unbanned", description=f"ID: {member.id}", color=get_color.get_color(discord.Color.random()))
embed.set_thumbnail(url=member.avatar_url)
await message_channel.send(embed=embed)
# join event
@commands.Cog.listener()
async def on_member_join(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(id=int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"Member {member} joined the the server.", color=member.color,
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f"UUID: {member.id}")
await message_channel.send(embed=embed)
# leave event
@commands.Cog.listener()
async def on_member_remove(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(id=int(message_channel_id))
if message_channel is None:
return
roles = [role for role in member.roles]
embed = discord.Embed(title=f"Member {member} left from the server.", color=member.color,
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.add_field(name="Their roles:", value=" ".join(
[role.mention for role in roles]))
embed.set_footer(text=f"UUID: {member.id}")
embed.set_thumbnail(url=member.avatar_url)
await message_channel.send(embed=embed)
def setup(bot):
bot.add_cog(Modlogs(bot))
|
python
|
import pytest
from django.contrib.auth import get_user_model
from django.urls import reverse
from tahoe_idp.tests.magiclink_fixtures import user # NOQA: F401
User = get_user_model()
@pytest.mark.django_db
def test_studio_login_must_be_authenticated(client, settings): # NOQA: F811
url = reverse('studio_login')
response = client.get(url)
assert response.status_code == 302
assert response.url.startswith(settings.LOGIN_URL)
@pytest.mark.django_db
def test_studio_login(settings, client, user): # NOQA: F811
url = reverse('studio_login')
client.login(username=user.username, password='password')
response = client.get(url)
assert response.status_code == 302
assert response.url.startswith('http://{studio_domain}'.format(studio_domain=settings.MAGICLINK_STUDIO_DOMAIN))
|
python
|
UI_INTERACTIONS = {
'learn-more': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_learn_more_btn',
'icon_name': 'document',
'color': 'yellow',
'cta_text': 'Learn More'
},
'advanced-metrics': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_web_metrics_btn',
'icon_name': 'paw',
'color': 'blue',
'cta_text': 'See advanced metrics'
},
'open-dashboard': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_summary_btn',
'icon_name': 'guage',
'color': 'purple',
'cta_text': 'View summary'
},
'toggle-status-metrics': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_toggle_status_bar_metrics_btn',
'icon_name': 'slash-eye',
'color': 'blue',
'cta_text': 'Hide status bar metrics'
},
'submit-feedback': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_submit_feedback_btn',
'icon_name': 'text-bubble',
'color': 'green',
'cta_text': 'Submit feedback'
},
'google-signup': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_sign_up_google_btn',
'icon_name': 'google',
'color': '',
'cta_text': 'Sign up with Google'
},
'github-signup': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_sign_up_github_btn',
'icon_name': 'github',
'color': 'white',
'cta_text': 'Sign up with Github'
},
'email-signup': {
'interaction_type': 'click',
'element_location': 'ct_menu_tree',
'element_name': 'ct_sign_up_email_btn',
'icon_name': 'envelope',
'color': 'gray',
'cta_text': 'Sign up with email'
},
'code-time': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_codetime_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Code time'
},
'active-code-time': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_active_codetime_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Active code time'
},
'lines-added': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_lines_added_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Lines added'
},
'lines-removed': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_lines_removed_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Lines removed'
},
'keystrokes': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_keystrokes_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Keystrokes'
},
'files-changed': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_files_changed_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Files changed today'
},
'top-kpm-files': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_top_files_by_kpm_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Top files by KPM'
},
'top-keystrokes-files': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_top_files_by_keystrokes_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Top files by keystrokes'
},
'top-codetime-files': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_top_files_by_codetime_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Top files by code time'
},
'open-changes': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_open_changes_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Open changes'
},
'committed-today': {
'interaction_type': 'click',
'element_location': 'ct_metrics_tree',
'element_name': 'ct_committed_today_toggle_node',
'icon_name': '',
'color': 'blue',
'cta_text': 'Committed today'
},
'contributors-repo-link': {
'interaction_type': 'click',
'element_location': 'ct_contributors_tree',
'element_name': 'ct_contributor_repo_identifier_btn',
'icon_name': '',
'color': 'blue',
'cta_text': 'redacted'
},
'view-dashboard': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_summary_cmd',
'icon_name': '',
'color': '',
'cta_text': 'View Dashboard'
},
'toggle-status-bar-metrics': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_toggle_status_bar_metrics_cmd',
'icon_name': '',
'color': '',
'cta_text': 'Show/Hide Status Bar Metrics'
},
'view-web-dashboard': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_web_metrics_cmd',
'icon_name': '',
'color': '',
'cta_text': ''
},
'show-tree-view': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_show_tree_view_cmd',
'icon_name': '',
'color': '',
'cta_text': 'Code Time: Show Tree View'
},
'pause-telemetry': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_pause_telemetry_cmd',
'icon_name': '',
'color': '',
'cta_text': 'Code Time: Pause'
},
'enable-telemetry': {
'interaction_type': 'keyboard',
'element_location': 'ct_command_palette',
'element_name': 'ct_enable_telemetry_cmd',
'icon_name': '',
'color': '',
'cta_text': 'Code Time: Enable'
}
}
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for topic bridging table."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.api_lib.functions import transforms
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class TopicBridge(base.Group):
"""Manage topic bridging table for the Edge device.
Manage topic bridging table for the Edge device. A topic bridging table is
similar to routing table. Each row of the table defines a rule. A rule will
route topic from one domain (Edge or cloud) to another domain, and it can
also route messages from a topic to another topic.
"""
|
python
|
from django.views.generic import (
ListView,
CreateView,
UpdateView,
DeleteView,
DetailView,
)
from django.urls import reverse_lazy
from .models import Todo
class ListTodosView(ListView):
model = Todo
class DetailTodoView(DetailView):
model = Todo
class CreateTodoView(CreateView):
model = Todo
fields = ["title", "description", "priority"]
def get_success_url(self):
return reverse_lazy("todos:list")
class UpdateTodoView(UpdateView):
model = Todo
fields = ["title", "description", "priority"]
def get_success_url(self):
return reverse_lazy("todos:list")
class DeleteTodoView(DeleteView):
model = Todo
def get_success_url(self):
return reverse_lazy("todos:list")
|
python
|
import sys
import bpy
import threading
from .signal import Signal
from .utils import find_first_view3d
class AnimationController:
'''Provides an interface to Blender's animation system with fine-grained callbacks.
To play nice with Blender, blendtorch provides a callback based class for interacting
with the Blender's animation and rendering system. The usual way to interact with
this class is through an object of AnimationController. Depending on the required
callbacks, one or more signals are connected to Python functions.
`AnimationController.play` starts the animation loop.
By default `AnimationController.play` is non-blocking and therefore requires a
non background instance of Blender. In case `--background` is required,
`AnimationController.play` also supports blocking animation loop variant. In blocking
execution, offscreen rendering works but may crash Blender once the loop is exited (2.83.2),
and is therefore not recommended when image data is required.
`AnimationController` exposes the following signals
- pre_play() invoked before playing starts
- pre_animation() invoked before first frame of animation range is processed
- pre_frame() invoked before a frame begins
- post_frame() invoked after a frame is finished
- post_animation() invoked after the last animation frame has completed
- post_play() invoked after playing ends
'''
def __init__(self):
'''Create a new instance.'''
self.pre_animation = Signal()
self.pre_frame = Signal()
self.post_frame = Signal()
self.post_animation = Signal()
self.pre_play = Signal()
self.post_play = Signal()
self._plyctx = None
class _PlayContext:
'''Internal bookkeeping of animation veriables.'''
def __init__(self, frame_range, num_episodes, use_animation, use_offline_render):
self.frame_range = frame_range
self.use_animation = use_animation
self.use_offline_render = use_offline_render
self.episode = 0
self.num_episodes = num_episodes
self.pending_post_frame = False
self.draw_handler = None
self.draw_space = None
self.last_post_frame = 0
def skip_post_frame(self, current_frame):
return (
not self.pending_post_frame or
self.last_post_frame == current_frame or
(
self.use_animation and
self.use_offline_render and
bpy.context.space_data != self.draw_space
)
)
@property
def frameid(self):
'''Returns the current frame id.'''
return bpy.context.scene.frame_current
def play(self, frame_range=None, num_episodes=-1, use_animation=True, use_offline_render=True, use_physics=True):
'''Start the animation loop.
Params
------
frame_range: tuple
Start and end of frame range to play. Note that start and end are inclusive.
num_episodes: int
The number of loops to play. -1 loops forever.
use_animation: bool
Whether to use Blender's non-blocking animation system or use a blocking variant.
By default True. When True, allows BlenderUI to refresh and be responsive. The animation
will be run in target FPS. When false, does not allow Blender UI to refresh. The animation
runs as fast as it can.
use_offline_render: bool
Whether offline rendering should be supported. By default True. When True, calls to
`OffscreenRenderer` are safe inside the `post_frame` callback.
use_physics: bool
Whether physics should be enabled. Default is True. When True, sets the simulation range
to match the animation frame range.
'''
assert self._plyctx is None, 'Animation already running'
self._plyctx = AnimationController._PlayContext(
frame_range=AnimationController.setup_frame_range(frame_range, physics=use_physics),
num_episodes=(num_episodes if num_episodes >= 0 else sys.maxsize),
use_animation=use_animation,
use_offline_render=use_offline_render
)
if use_animation:
self._play_animation()
else:
self._play_manual()
@staticmethod
def setup_frame_range(frame_range, physics=True):
'''Setup the animation and physics frame range.
Params
------
frame_range: tuple
Start and end (inclusive) frame range to be animated.
Can be None, in which case the scenes frame range is used.
physics: bool
Whether or not to apply the frame range settings to the rigid body
simulation.
Returns
-------
frame_range: tuple
the updated frame range.
'''
if frame_range is None:
frame_range = (bpy.context.scene.frame_start, bpy.context.scene.frame_end)
bpy.context.scene.frame_start = frame_range[0]
bpy.context.scene.frame_end = frame_range[1]
if physics and bpy.context.scene.rigidbody_world:
bpy.context.scene.rigidbody_world.point_cache.frame_start = frame_range[0]
bpy.context.scene.rigidbody_world.point_cache.frame_end = frame_range[1]
return frame_range
def _play_animation(self):
'''Setup and start Blender animation loop.'''
self.pre_play.invoke()
bpy.app.handlers.frame_change_pre.append(self._on_pre_frame)
if self._plyctx.use_offline_render:
# To be save, we need to draw from `POST_PIXEL` not `frame_change_post`.
# However `POST_PIXEL` might be called more than once per frame. We therefore
# set and release `pending_post_pixel` to match things up.
_, self._plyctx.draw_space, _ = find_first_view3d()
self._plyctx.draw_handler = bpy.types.SpaceView3D.draw_handler_add(self._on_post_frame, (), 'WINDOW', 'POST_PIXEL')
else:
bpy.app.handlers.frame_change_post.append(self._on_post_frame)
# Set to first frame.
bpy.context.scene.frame_set(self._plyctx.frame_range[0])
# The following does not block. Note, in --offscreen this does nothing.
bpy.ops.screen.animation_play()
def _play_manual(self):
'''Setup and start blocking animation loop.'''
self.pre_play.invoke()
bpy.app.handlers.frame_change_pre.append(self._on_pre_frame)
bpy.app.handlers.frame_change_post.append(self._on_post_frame)
while self._plyctx.episode < self._plyctx.num_episodes:
bpy.context.scene.frame_set(self._plyctx.frame_range[0])
while self.frameid < self._plyctx.frame_range[1]:
bpy.context.scene.frame_set(self.frameid+1)
if self._plyctx == None: # The above frame_set might have called _cancel,
return # which in turn deletes _plyctx
def rewind(self):
'''Request resetting the animation to first frame.'''
if self._plyctx is not None:
self._set_frame(self._plyctx.frame_range[0])
def _set_frame(self, frame_index):
'''Step to a specific frame.'''
bpy.context.scene.frame_set(frame_index)
def _on_pre_frame(self, scene, *args):
'''Handle pre-frame events internally.'''
pre_first = (self.frameid == self._plyctx.frame_range[0])
if pre_first:
self.pre_animation.invoke()
self.pre_frame.invoke()
# The following guards us from multiple calls to `_on_post_frame`
# when we hooked into `POST_PIXEL`
self._plyctx.pending_post_frame = True
def _on_post_frame(self, *args):
'''Handle post-frame events internally.'''
if self._plyctx.skip_post_frame(self.frameid):
return
self._plyctx.pending_post_frame = False
self._plyctx.last_post_frame = self.frameid
self.post_frame.invoke()
post_last = (self.frameid == self._plyctx.frame_range[1])
if post_last:
self.post_animation.invoke()
self._plyctx.episode += 1
if self._plyctx.episode == self._plyctx.num_episodes:
self._cancel()
def _cancel(self):
'''Stop the animation.'''
bpy.app.handlers.frame_change_pre.remove(self._on_pre_frame)
if self._plyctx.draw_handler != None:
bpy.types.SpaceView3D.draw_handler_remove(self._plyctx.draw_handler, 'WINDOW')
self._plyctx.draw_handler = None
else:
bpy.app.handlers.frame_change_post.remove(self._on_post_frame)
bpy.ops.screen.animation_cancel(restore_frame=False)
self.post_play.invoke()
del self._plyctx
self._plyctx = None
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 11:01:58 2021
@author: root
"""
import sklearn
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch
from utils_two_moons import evaluate_model, brier_score, expectation_calibration_error
from utils_two_moons import NeuralNet, MCDropout, EnsembleNeuralNet
from utils_two_moons import mixup_log_loss
from training_loops import train_model_dropout
from utils_two_moons import MyData
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
def get_device():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
return device
device = get_device()
################# 1.CREATE THE DATASETS #################
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
batch_sample = 1000
X,Y = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.1, random_state=None)
X_test,Y_test = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.1, random_state=None)
plt.scatter(X[:, 0], X[:, 1], c=Y)
# Scale in x and y directions
aug_x = (1.5 - 0.5) * np.random.rand() + 0.5
aug_y = (2.5 - 1.5) * np.random.rand() + 1.5
aug = np.array([aug_x, aug_y])
X_scale = X * aug
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=cm_bright)
plt.scatter(X_scale[:, 0], X_scale[:, 1], marker='+',c=Y, cmap=cm_bright, alpha=0.4)
## rotation of 45 degrees
theta = (np.pi/180)* -35
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
X_rot = np.dot(X,rotation_matrix)
plt.scatter(X[:, 0], X[:, 1], c=Y,cmap=cm_bright)
plt.scatter(X_rot[:, 0], X_rot[:, 1], marker='+', c=Y, cmap=cm_bright, alpha=0.4)
# We create the same dataset with more noise
X_noise,Y_noise = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.3, random_state=None)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=cm_bright)
plt.scatter(X_noise[:, 0], X_noise[:, 1], marker='+', c=Y_noise, cmap=cm_bright, alpha=0.4)
train_dataset = MyData(data=X,labels=Y)
test_dataset = MyData(data=X_test,labels=Y_test)
scale_dataset = MyData(X_scale, Y)
rot_dataset = MyData(X_rot, Y)
noise_dataset = MyData(X_noise, Y_noise)
trainLoader = DataLoader(train_dataset, batch_size=batch_sample)
testLoader = DataLoader(test_dataset, batch_size=batch_sample)
scaleLoader = DataLoader(scale_dataset, batch_size=batch_sample)
rotLoader = DataLoader(rot_dataset, batch_size=batch_sample)
noiseLoader = DataLoader(noise_dataset, batch_size=batch_sample)
################# 2.TRAINING #################
# Simple Neural Network
base_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double()
optimizer = torch.optim.Adam(base_nn.parameters(), lr=0.01)
MC_sample=1
crit = nn.CrossEntropyLoss()
n_epochs = 500
_, training_loss = train_model_dropout(base_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2)
# Neural Network with MC Dropout
vi_nn = MCDropout(input_dim=2, hidden_dim=10, output_dim=2).double()
optimizer = torch.optim.Adam(vi_nn.parameters(), lr=0.01)
MC_sample=50
crit = nn.CrossEntropyLoss()
n_epochs = 500
_, training_loss = train_model_dropout(vi_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2)
# project the 64-dimensional data to a lower dimension
def estimate_input_density(data):
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=2, whiten=False)
data = pca.fit_transform(data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
return kde, pca
kde, pca = estimate_input_density(X)
## Train an ensemble of NN
def train_ensemble(N, n_epochs, trainLoader):
ensembles = []
for i in range(N):
base_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double()
optimizer = torch.optim.Adam(base_nn.parameters(), lr=0.01)
MC_sample=1
crit = nn.CrossEntropyLoss()
_, training_loss = train_model_dropout(base_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2)
ensembles.append(base_nn)
return ensembles
ensemble = train_ensemble(5, 500, trainLoader)
ensemble_nn = EnsembleNeuralNet(ensemble)
## Train with mixup
# Simple Neural Network
mu_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double()
optimizer = torch.optim.Adam(mu_nn.parameters(), lr=0.01)
MC_sample=1
crit = mixup_log_loss
n_epochs = 500
_, training_loss = train_model_dropout(mu_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, mixup=True)
## Train Fast Gradient Sign Method
# Simple Neural Network
fgsm_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double()
optimizer = torch.optim.Adam(fgsm_nn.parameters(), lr=0.01)
MC_sample=1
crit = nn.CrossEntropyLoss()
n_epochs = 500
_, training_loss = train_model_dropout(fgsm_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, mixup=False, fgsm=True)
#plt.plot(training_loss)
# Train using the density
#base_density_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double()
#optimizer = torch.optim.Adam(base_density_nn.parameters(), lr=0.01)
#MC_sample=1
#crit = nn.CrossEntropyLoss()
#n_epochs = 500
#_, training_loss = train_model_dropout(base_density_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, kde=kde, pca=pca)
################# 3.EVALUATION BASED ON ACCURCAY #################
from uncertainty import sample_lowest_entropy, sample_highest_density, sample_lowest_entropy_highest_density
retained = [50, 60, 70, 80, 90, 100]
def model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, data, label, MC_sample, no_classes):
"""
This function will retain the data with the highest density
at 6 different levels and put them in loaders.
Furthermore, the accuracy at each level will be computed
The accuracies can be used to plot how the accuracy drops while we increase data
the loader allow to have access to the data sampled with the high density
criterion.
"""
loader50 = sample_lowest_entropy_highest_density(.5, model, kde, pca, data, label, MC_sample, no_classes)
loader60 = sample_lowest_entropy_highest_density(.6, model, kde, pca, data, label, MC_sample, no_classes)
loader70 = sample_lowest_entropy_highest_density(.7, model, kde, pca, data, label, MC_sample, no_classes)
loader80 = sample_lowest_entropy_highest_density(.8, model, kde, pca, data, label, MC_sample, no_classes)
loader90 = sample_lowest_entropy_highest_density(.9, model, kde, pca, data, label, MC_sample, no_classes)
loader100 = sample_lowest_entropy_highest_density(1., model, kde, pca, data, label, MC_sample, no_classes)
acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2)
acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2)
acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2)
acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2)
acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2)
acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2)
acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100]
loaders = [loader50, loader60, loader70, loader80, loader90, loader100]
return acc, loaders
def model_accuracy_over_high_density_data_retained(model,kde, pca, data, label, MC_sample, no_classes):
"""
This function will retain the data with the highest density
at 6 different levels and put them in loaders.
Furthermore, the accuracy at each level will be computed
The accuracies can be used to plot how the accuracy drops while we increase data
the loader allow to have access to the data sampled with the high density
criterion.
"""
loader50 = sample_highest_density(0.5, kde, pca, data, label)
loader60 = sample_highest_density(0.6, kde, pca, data, label)
loader70 = sample_highest_density(0.7, kde, pca, data, label)
loader80 = sample_highest_density(0.8, kde, pca, data, label)
loader90 = sample_highest_density(0.9, kde, pca, data, label)
loader100 = sample_lowest_entropy(1., model, data, label, MC_sample, no_classes)
acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2)
acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2)
acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2)
acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2)
acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2)
acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2)
acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100]
loaders = [loader50, loader60, loader70, loader80, loader90, loader100]
return acc, loaders
def model_accuracy_over_low_entropy_data_retained(model, data, label, MC_sample, no_classes):
"""
This function will retain the data with the lowest entropy
at 6 different levels and put them in loaders.
Furthermore, the accuracy at each level will be computed and returned
along with the associated loaders.
The accuracies can be used to plot how the accuracy drops while we increase data
the loader allow to have access to the data sampled with the low entropy
criterion.
"""
loader50 = sample_lowest_entropy(0.5, model, data, label, MC_sample, no_classes)
loader60 = sample_lowest_entropy(0.6, model, data, label, MC_sample, no_classes)
loader70 = sample_lowest_entropy(0.7, model, data, label, MC_sample, no_classes)
loader80 = sample_lowest_entropy(0.8, model, data, label, MC_sample, no_classes)
loader90 = sample_lowest_entropy(0.9, model, data, label, MC_sample, no_classes)
loader100 = sample_lowest_entropy(1., model, data, label, MC_sample, no_classes)
acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2)
acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2)
acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2)
acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2)
acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2)
acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2)
acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100]
loaders = [loader50, loader60, loader70, loader80, loader90, loader100]
return acc, loaders
### Comparing sampling methods against each others
def aggregate_accuracy_perturbation_retained_data(model, kde, pca, datasets, labels, MC_sample, no_classes):
X_test, X_scale, X_rot, X_noise = datasets
Y_test, Y, Y_noise = labels
test_ende_acc, test_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2)
test_en_acc, test_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_test, Y_test, MC_sample=1, no_classes=2)
test_de_acc, test_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2)
scale_ende_acc, scale_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_scale, Y, MC_sample=1, no_classes=2)
scale_en_acc, scale_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_scale, Y, MC_sample=1, no_classes=2)
scale_de_acc, scale_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_scale, Y, MC_sample=1, no_classes=2)
noise_ende_acc, noise_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2)
noise_en_acc, noise_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_noise, Y_noise, MC_sample=1, no_classes=2)
noise_de_acc, noise_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2)
rot_ende_acc, rot_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_rot, Y, MC_sample=1, no_classes=2)
rot_en_acc, rot_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_rot, Y, MC_sample=1, no_classes=2)
rot_de_acc, rot_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_rot, Y, MC_sample=1, no_classes=2)
aggregate_ende = np.concatenate([test_ende_acc, scale_ende_acc, noise_ende_acc, rot_ende_acc], 1)
aggregate_en = np.concatenate([test_en_acc, scale_en_acc, noise_en_acc, rot_en_acc], 1)
aggregate_de = np.concatenate([test_de_acc, scale_de_acc, noise_de_acc, rot_de_acc], 1)
loaders_ende = [test_ende_loaders, scale_ende_loaders, noise_ende_loaders, rot_ende_loaders]
loaders_en = [test_en_loaders, scale_en_loaders, noise_en_loaders, rot_en_loaders]
loaders_de = [test_de_loaders, scale_de_loaders, noise_de_loaders, rot_de_loaders]
return (aggregate_ende, aggregate_en, aggregate_de), (loaders_ende, loaders_en, loaders_de)
datasets = [X_test, X_scale, X_rot, X_noise]
labels = [Y_test, Y, Y_noise]
(base_ende, base_en, base_de), base_loaders = aggregate_accuracy_perturbation_retained_data(base_nn, kde, pca, datasets, labels, 1, 2)
vi_ende, vi_en, vi_de = aggregate_accuracy_perturbation_retained_data(vi_nn, kde, pca, datasets, labels, 50, 2)
en_ende, en_en, en_de = aggregate_accuracy_perturbation_retained_data(ensemble_nn, kde, pca, datasets, labels, 1, 2)
mu_ende, mu_en, mu_de = aggregate_accuracy_perturbation_retained_data(mu_nn, kde, pca, datasets, labels, 1, 2)
ad_ende, ad_en, ad_de = aggregate_accuracy_perturbation_retained_data(fgsm_nn, kde, pca, datasets, labels, 1, 2)
fig, ax = plt.subplots(1,5, figsize=(22,4))
ax[0].set_ylabel("Aggregate over perturbations")
ax[0].plot(base_ende.mean(1), label="Entropy-Density")
ax[0].plot(base_en.mean(1), label="Entropy")
ax[0].plot(base_de.mean(1), label="Density")
ax[0].legend()
ax[0].set_title("Softmax")
ax[1].plot(vi_ende.mean(1), label="Entropy-Density")
ax[1].plot(vi_en.mean(1), label="Entropy")
ax[1].plot(vi_de.mean(1), label="Density")
ax[1].legend()
ax[1].set_title("Dropout")
ax[2].plot(en_ende.mean(1), label="Entropy-Density")
ax[2].plot(en_en.mean(1), label="Entropy")
ax[2].plot(en_de.mean(1), label="Density")
ax[2].legend()
ax[2].set_title("Ensemble")
ax[3].plot(mu_ende.mean(1), label="Entropy-Density")
ax[3].plot(mu_en.mean(1), label="Entropy")
ax[3].plot(mu_de.mean(1), label="Density")
ax[3].legend()
ax[3].set_title("Mixup")
ax[4].plot(ad_ende.mean(1), label="Entropy-Density")
ax[4].plot(ad_en.mean(1), label="Entropy")
ax[4].plot(ad_de.mean(1), label="Density")
ax[4].legend()
ax[4].set_title("FGSM")
plt.savefig("retained_aggregate_over_perturbation")
# Plot the aggregate accuracy with data retained
fig, ax = plt.subplots(1,4, figsize=(22,4))
ax[0].plot(base_en[0], label="Entropy")
ax[0].plot(base_de[0], label="Density")
#ax[0].plot(base_test_de2_acc, label="Density relaxed 2")
#ax[0].plot(base_test_de1_1_acc, label="Density relaxed 1.1")
ax[0].plot(base_ende[0], label="Entropy-Density")
ax[0].legend()
ax[0].set_title("Test data")
ax[1].plot(base_en[1], label="Entropy")
ax[1].plot(base_de[1], label="Density")
#ax[1].plot(base_scale_de2_acc, label="Density relaxed 2")
#ax[1].plot(base_scale_de1_1_acc, label="Density relaxed 1.1")
ax[1].plot(base_ende[1], label="Entropy-Density")
ax[1].legend()
ax[1].set_title("Scale data")
ax[2].plot(base_en[2], label="Entropy")
ax[2].plot(base_de[2], label="Density")
#ax[2].plot(base_noise_de2_acc, label="Density relaxed 2")
#ax[2].plot(base_noise_de1_1_acc, label="Density relaxed 1.1")
ax[2].plot(base_ende[2], label="Entropy-Density")
ax[2].legend()
ax[2].set_title("Noise data")
ax[3].plot(base_en[3], label="Entropy")
ax[3].plot(base_de[3], label="Density")
#ax[3].plot(base_rot_de2_acc, label="Density relaxed 2")
#ax[3].plot(base_rot_de1_1_acc, label="Density relaxed 1.1")
ax[3].plot(base_ende[3], label="Entropy-Density")
ax[3].legend()
ax[3].set_title("Rotation data")
plt.savefig("retained_lowestEntropy_highestDensity")
### Comparing methods agains each others
# Accuracies for data retained on the test set
base_test_acc, base_test_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_test, Y_test, MC_sample=1, no_classes=2)
vi_test_acc, vi_test_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_test, Y_test, MC_sample=50, no_classes=2)
en_test_acc, en_test_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_test, Y_test, MC_sample=1, no_classes=2)
mu_test_acc, mu_test_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_test, Y_test, MC_sample=1, no_classes=2)
ad_test_acc, ad_test_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_test, Y_test, MC_sample=1, no_classes=2)
pde_test_acc, pde_test_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2)
# Accuracies for data retained on the scale perturbation set
base_scale_acc, base_scale_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_scale, Y, MC_sample=1, no_classes=2)
vi_scale_acc, vi_scale_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_scale, Y, MC_sample=50, no_classes=2)
en_scale_acc, en_scale_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_scale, Y, MC_sample=1, no_classes=2)
mu_scale_acc, mu_scale_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_scale, Y, MC_sample=1, no_classes=2)
ad_scale_acc, ad_scale_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_scale, Y, MC_sample=1, no_classes=2)
pde_scale_acc, pde_scale_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_scale, Y, MC_sample=1, no_classes=2)
# Accuracies for data retained on the scale rotation set
base_rot_acc, base_rot_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_rot, Y, MC_sample=1, no_classes=2)
vi_rot_acc, vi_rot_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_rot, Y, MC_sample=50, no_classes=2)
en_rot_acc, en_rot_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_rot, Y, MC_sample=1, no_classes=2)
mu_rot_acc, mu_rot_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_rot, Y, MC_sample=1, no_classes=2)
ad_rot_acc, ad_rot_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_rot, Y, MC_sample=1, no_classes=2)
pde_rot_acc, pde_rot_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_rot, Y, MC_sample=1, no_classes=2)
# Accuracies for data retained on the scale noise set
base_noise_acc, base_noise_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_noise, Y_noise, MC_sample=1, no_classes=2)
vi_noise_acc, vi_noise_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_noise, Y_noise, MC_sample=50, no_classes=2)
en_noise_acc, en_noise_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_noise, Y_noise, MC_sample=1, no_classes=2)
mu_noise_acc, mu_noise_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_noise, Y_noise, MC_sample=1, no_classes=2)
ad_noise_acc, ad_noise_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_noise, Y_noise, MC_sample=1, no_classes=2)
pde_noise_acc, pde_noise_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2)
# Plot the aggregate accuracy with data retained
fig, ax = plt.subplots(1,4, figsize=(22,4))
ax[0].plot(retained, base_test_acc, label="Base")
ax[0].plot(retained, vi_test_acc, label="Dropout")
ax[0].plot(retained, en_test_acc, label="Ensemble")
ax[0].plot(retained, mu_test_acc, label="Mixup")
ax[0].plot(retained, ad_test_acc, label="FGSM")
ax[0].plot(retained, pde_test_acc, label="PDE")
ax[0].set_title("Test Set")
ax[1].plot(retained, base_scale_acc, label="Base")
ax[1].plot(retained, vi_scale_acc, label="Dropout")
ax[1].plot(retained, en_scale_acc, label="Ensemble")
ax[1].plot(retained, mu_scale_acc, label="Mixup")
ax[1].plot(retained, ad_scale_acc, label="FGSM")
ax[1].plot(retained, pde_scale_acc, label="PDE")
ax[1].set_title("Scale Perturbation")
ax[2].plot(retained, base_rot_acc, label="Base")
ax[2].plot(retained, vi_rot_acc, label="Dropout")
ax[2].plot(retained, en_rot_acc, label="Ensemble")
ax[2].plot(retained, mu_rot_acc, label="Mixup")
ax[2].plot(retained, ad_rot_acc, label="FGSM")
ax[2].plot(retained, pde_rot_acc, label="PDE")
ax[2].set_title("Rotation Perturbation")
ax[3].plot(retained, base_noise_acc, label="Base")
ax[3].plot(retained, vi_noise_acc, label="Dropout")
ax[3].plot(retained, en_noise_acc, label="Ensemble")
ax[3].plot(retained, mu_noise_acc, label="Mixup")
ax[3].plot(retained, ad_noise_acc, label="FGSM")
ax[3].plot(retained, pde_noise_acc, label="PDE")
ax[3].set_title("Noise Perturbation")
ax[3].legend(loc="upper left", bbox_to_anchor=(1,1))
plt.savefig("retained_aggregate_accuracy", dpi=300)
################ 4. EVALUATION BASED ON AUC ################
def compute_auc_models(model, loaders, vi=False):
loader50, loader60, loader70, loader80, loader90, loader100 = loaders
if vi==True:
Y_pred50 = torch.cat([torch.sigmoid(model(torch.tensor(loader50.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
Y_pred60 = torch.cat([torch.sigmoid(model(torch.tensor(loader60.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
Y_pred70 = torch.cat([torch.sigmoid(model(torch.tensor(loader70.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
Y_pred80 = torch.cat([torch.sigmoid(model(torch.tensor(loader80.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
Y_pred90 = torch.cat([torch.sigmoid(model(torch.tensor(loader90.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
Y_pred100 = torch.cat([torch.sigmoid(model(torch.tensor(loader100.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy()
else:
Y_pred50 = torch.sigmoid(model(torch.tensor(loader50.dataset.data)))[:,1].detach().numpy()
Y_pred60 = torch.sigmoid(model(torch.tensor(loader60.dataset.data)))[:,1].detach().numpy()
Y_pred70 = torch.sigmoid(model(torch.tensor(loader70.dataset.data)))[:,1].detach().numpy()
Y_pred80 = torch.sigmoid(model(torch.tensor(loader80.dataset.data)))[:,1].detach().numpy()
Y_pred90 = torch.sigmoid(model(torch.tensor(loader90.dataset.data)))[:,1].detach().numpy()
Y_pred100 = torch.sigmoid(model(torch.tensor(loader100.dataset.data)))[:,1].detach().numpy()
auc50 = sklearn.metrics.roc_auc_score(loader50.dataset.labels, Y_pred50)
auc60 = sklearn.metrics.roc_auc_score(loader60.dataset.labels, Y_pred60)
auc70 = sklearn.metrics.roc_auc_score(loader70.dataset.labels, Y_pred70)
auc80 = sklearn.metrics.roc_auc_score(loader80.dataset.labels, Y_pred80)
auc90 = sklearn.metrics.roc_auc_score(loader90.dataset.labels, Y_pred90)
auc100 = sklearn.metrics.roc_auc_score(loader100.dataset.labels, Y_pred100)
return [auc50, auc60, auc70, auc80, auc90, auc100]
# AUC for data retained on the test set
base_auc_test = compute_auc_models(base_nn, base_test_loaders, vi=False)
vi_auc_test = compute_auc_models(vi_nn, vi_test_loaders, vi=True)
en_auc_test = compute_auc_models(ensemble_nn, en_test_loaders, vi=False)
mu_auc_test = compute_auc_models(mu_nn, mu_test_loaders, vi=False)
ad_auc_test = compute_auc_models(fgsm_nn, ad_test_loaders, vi=False)
pde_auc_test = compute_auc_models(base_nn, pde_test_loaders, vi=False)
# AUC for data retained on the scale perturbation set
base_auc_scale = compute_auc_models(base_nn, base_scale_loaders, vi=False)
vi_auc_scale = compute_auc_models(vi_nn, vi_scale_loaders, vi=True)
en_auc_scale = compute_auc_models(ensemble_nn, en_scale_loaders, vi=False)
mu_auc_scale = compute_auc_models(mu_nn, mu_scale_loaders, vi=False)
ad_auc_scale = compute_auc_models(fgsm_nn, ad_scale_loaders, vi=False)
pde_auc_scale = compute_auc_models(base_nn, pde_scale_loaders, vi=False)
# AUC for data retained on the rotation perturbation set
base_auc_rot = compute_auc_models(base_nn, base_rot_loaders, vi=False)
vi_auc_rot = compute_auc_models(vi_nn, vi_rot_loaders, vi=True)
en_auc_rot = compute_auc_models(ensemble_nn, en_rot_loaders, vi=False)
mu_auc_rot = compute_auc_models(mu_nn, mu_rot_loaders, vi=False)
ad_auc_rot = compute_auc_models(fgsm_nn, ad_rot_loaders, vi=False)
pde_auc_rot = compute_auc_models(base_nn, pde_rot_loaders, vi=False)
# AUC for data retained on the noise perturbation set
base_auc_noise = compute_auc_models(base_nn, base_noise_loaders, vi=False)
vi_auc_noise = compute_auc_models(vi_nn, vi_noise_loaders, vi=True)
en_auc_noise = compute_auc_models(ensemble_nn, en_noise_loaders, vi=False)
mu_auc_noise = compute_auc_models(mu_nn, mu_noise_loaders, vi=False)
ad_auc_noise = compute_auc_models(fgsm_nn, ad_noise_loaders, vi=False)
pde_auc_noise = compute_auc_models(base_nn, pde_noise_loaders, vi=False)
# Plot the aggregate accuracy with data retained
fig, ax = plt.subplots(1,4, figsize=(22,4))
ax[0].plot(retained, base_auc_test, label="Base")
ax[0].plot(retained, vi_auc_test, label="Dropout")
ax[0].plot(retained, en_auc_test, label="Ensemble")
ax[0].plot(retained, mu_auc_test, label="Mixup")
ax[0].plot(retained, ad_auc_test, label="FGSM")
ax[0].plot(retained, pde_auc_test, label="PDE")
ax[0].set_title("Test Set")
ax[1].plot(retained, base_auc_scale, label="Base")
ax[1].plot(retained, vi_auc_scale, label="Dropout")
ax[1].plot(retained, en_auc_scale, label="Ensemble")
ax[1].plot(retained, mu_auc_scale, label="Mixup")
ax[1].plot(retained, ad_auc_scale, label="FGSM")
ax[1].plot(retained, pde_auc_scale, label="PDE")
ax[1].set_title("Scale Perturbation")
ax[2].plot(retained, base_auc_rot, label="Base")
ax[2].plot(retained, vi_auc_rot, label="Dropout")
ax[2].plot(retained, en_auc_rot, label="Ensemble")
ax[2].plot(retained, mu_auc_rot, label="Mixup")
ax[2].plot(retained, ad_auc_rot, label="FGSM")
ax[2].plot(retained, pde_auc_rot, label="PDE")
ax[2].set_title("Rotation Perturbation")
ax[3].plot(retained, base_auc_noise, label="Base")
ax[3].plot(retained, vi_auc_noise, label="Dropout")
ax[3].plot(retained, en_auc_noise, label="Ensemble")
ax[3].plot(retained, mu_auc_noise, label="Mixup")
ax[3].plot(retained, ad_auc_noise, label="FGSM")
ax[3].plot(retained, pde_auc_noise, label="PDE")
ax[3].set_title("Noise Perturbation")
ax[3].legend(loc="upper left", bbox_to_anchor=(1,1))
plt.savefig("retained_aggregate_auc", dpi=300)
################# 5.DRAW DECISION BOUNDARIES #################
def negatify(X):
X = np.copy(X)
neg = X < 0.5
X[neg] =X[neg]-1
return X
# Create a mesh
h = .02 # step size in the mesh
x_min = np.concatenate([X[:, 0], X_rot[:, 0], X_scale[:, 0], X_noise[:, 0]]).min()
x_max = np.concatenate([X[:, 0], X_rot[:, 0], X_scale[:, 0], X_noise[:, 0]]).max()
y_min = np.concatenate([X[:, 1], X_rot[:, 1], X_scale[:, 1], X_noise[:, 1]]).min()
y_max = np.concatenate([X[:, 1], X_rot[:, 1], X_scale[:, 1], X_noise[:, 1]]).max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Predict for each point of the mesh
base_Z = torch.sigmoid(base_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1])
# Here we create a list that we concatenate and we average the result
vi_Z = torch.cat([torch.sigmoid(vi_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1:]) for i in range(50)],1).mean(1)
en_Z = torch.sigmoid(ensemble_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1])
mu_Z = torch.sigmoid(mu_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1])
ad_Z = torch.sigmoid(fgsm_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1])
base_Z = base_Z.reshape(xx.shape).detach().numpy()
base_Z_ = negatify(base_Z)
vi_Z = vi_Z.reshape(xx.shape).detach().numpy()
vi_Z_ = negatify(vi_Z)
en_Z = en_Z.reshape(xx.shape).detach().numpy()
en_Z_ = negatify(en_Z)
mu_Z = mu_Z.reshape(xx.shape).detach().numpy()
mu_Z_ = negatify(mu_Z)
ad_Z = ad_Z.reshape(xx.shape).detach().numpy()
ad_Z_ = negatify(ad_Z)
p_x = kde.score_samples(pca.transform(np.c_[xx.ravel(), yy.ravel()]))
p_x = p_x.reshape(xx.shape)
p_x_e = np.power(np.exp(1), p_x)
p_x_2 = np.power(2, p_x)
p_x_1_5 = np.power(1.5, p_x)
cm = plt.cm.RdBu
plt.rcParams.update({'font.size': 14})
##### 5.1 Plot on the test dataset
fig, ax = plt.subplots(6,6, figsize=(24,22))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Softmax")
ax[1,0].set_ylabel("Dropout")
ax[2,0].set_ylabel("Ensemble")
ax[3,0].set_ylabel("Mixup")
ax[4,0].set_ylabel("FGSM")
ax[5,0].set_ylabel("PDE")
for i in range(0,6):
if i==0:
loaders = base_test_loaders
Z = base_Z
elif i==1:
loaders = vi_test_loaders
Z = vi_Z
elif i==2:
loaders = en_test_loaders
Z = en_Z
elif i==3:
loaders = mu_test_loaders
Z = mu_Z
elif i==4:
loaders = ad_test_loaders
Z = ad_Z
else:
loaders = pde_test_loaders
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_test[:, 0], X_test[:, 1], c=Y_test, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_test", dpi=300)
##### 5.2 Plot on the scale dataset
fig, ax = plt.subplots(6,6, figsize=(24,22))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Softmax")
ax[1,0].set_ylabel("Dropout")
ax[2,0].set_ylabel("Ensemble")
ax[3,0].set_ylabel("Mixup")
ax[4,0].set_ylabel("FGSM")
ax[5,0].set_ylabel("PDE")
for i in range(0,6):
if i==0:
loaders = base_scale_loaders
Z = base_Z
elif i==1:
loaders = vi_scale_loaders
Z = vi_Z
elif i==2:
loaders = en_scale_loaders
Z = en_Z
elif i==3:
loaders = mu_scale_loaders
Z = mu_Z
elif i==4:
loaders = ad_scale_loaders
Z = ad_Z
else:
loaders = pde_scale_loaders
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_scale[:, 0], X_scale[:, 1], c=Y, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_scale", dpi=300)
##### 5.3 Plot on the rotation dataset
fig, ax = plt.subplots(6,6, figsize=(24,22))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Softmax")
ax[1,0].set_ylabel("Dropout")
ax[2,0].set_ylabel("Ensemble")
ax[3,0].set_ylabel("Mixup")
ax[4,0].set_ylabel("FGSM")
ax[5,0].set_ylabel("PDE")
for i in range(0,6):
if i==0:
loaders = base_rot_loaders
Z = base_Z
elif i==1:
loaders = vi_rot_loaders
Z = vi_Z
elif i==2:
loaders = en_rot_loaders
Z = en_Z
elif i==3:
loaders = mu_rot_loaders
Z = mu_Z
elif i==4:
loaders = ad_rot_loaders
Z = ad_Z
else:
loaders = pde_rot_loaders
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_rot[:, 0], X_rot[:, 1], c=Y, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_rot", dpi=300)
##### 5.4 Plot on the noise dataset
fig, ax = plt.subplots(6,6, figsize=(24,22))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Softmax")
ax[1,0].set_ylabel("Dropout")
ax[2,0].set_ylabel("Ensemble")
ax[3,0].set_ylabel("Mixup")
ax[4,0].set_ylabel("FGSM")
ax[5,0].set_ylabel("PDE")
for i in range(0,6):
if i==0:
loaders = base_noise_loaders
Z = base_Z
elif i==1:
loaders = vi_noise_loaders
Z = vi_Z
elif i==2:
loaders = en_noise_loaders
Z = en_Z
elif i==3:
loaders = mu_noise_loaders
Z = mu_Z
elif i==4:
loaders = ad_noise_loaders
Z = ad_Z
else:
loaders = pde_noise_loaders
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_noise", dpi=300)
##### 5.5 Compare ENDE, DE, EN with base_nn
# on test dataset
fig, ax = plt.subplots(3,6, figsize=(24,18))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Entropy-Density")
ax[1,0].set_ylabel("Entropy")
ax[2,0].set_ylabel("Density")
for i in range(0,3):
if i==0:
loaders = base_loaders[0][0]
Z = base_Z
elif i==1:
loaders = base_loaders[1][0]
Z = base_Z
else:
loaders = base_loaders[2][0]
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_test_ende_en_de", dpi=300)
# on scale dataset
fig, ax = plt.subplots(3,6, figsize=(24,18))
ax[0,0].set_title("50 % retained")
ax[0,1].set_title("60 % retained")
ax[0,2].set_title("70 % retained")
ax[0,3].set_title("80 % retained")
ax[0,4].set_title("90 % retained")
ax[0,5].set_title("100 % retained")
ax[0,0].set_ylabel("Entropy-Density")
ax[1,0].set_ylabel("Entropy")
ax[2,0].set_ylabel("Density")
for i in range(0,3):
if i==0:
loaders = base_loaders[0][1]
Z = base_Z
elif i==1:
loaders = base_loaders[1][1]
Z = base_Z
else:
loaders = base_loaders[2][1]
Z = base_Z_ * p_x_e
for j in range(0,6):
base_x, base_y = next(iter(loaders[j]))
im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8)
ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright)
ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1)
plt.savefig("retained_scale_ende_en_de", dpi=300)
|
python
|
from django.apps import AppConfig
class MetricConfig(AppConfig):
label = "metric"
name = "edd.metric"
verbose_name = "Metric"
def ready(self):
# make sure to load/register all the signal handlers
from . import signals # noqa: F401
|
python
|
from scipy.misc.common import logsumexp
from kameleon_rks.densities.gaussian import sample_gaussian, \
log_gaussian_pdf_multiple
from kameleon_rks.proposals.ProposalBase import ProposalBase
import kameleon_rks.samplers.tools
from kameleon_rks.tools.covariance_updates import log_weights_to_lmbdas, \
update_mean_cov_L_lmbda
from kameleon_rks.tools.log import Log
import numpy as np
logger = Log.get_logger()
class StaticMetropolis(ProposalBase):
"""
Implements the classic (isotropic) MH. Allows for tuning the scaling from acceptance rate.
"""
def __init__(self, D, target_log_pdf, step_size, schedule=None, acc_star=None):
ProposalBase.__init__(self, D, target_log_pdf, step_size, schedule, acc_star)
self.L_C = np.linalg.cholesky(np.eye(D))
def proposal_log_pdf(self, current, proposals):
log_probs = log_gaussian_pdf_multiple(proposals, mu=current,
Sigma=self.L_C, is_cholesky=True,
cov_scaling=self.step_size)
return log_probs
def proposal(self, current, current_log_pdf, **kwargs):
if current_log_pdf is None:
current_log_pdf = self.target_log_pdf(current)
proposal = sample_gaussian(N=1, mu=current, Sigma=self.L_C,
is_cholesky=True, cov_scaling=self.step_size)[0]
forw_backw_log_prob = self.proposal_log_pdf(current, proposal[np.newaxis, :])[0]
proposal_log_pdf = self.target_log_pdf(proposal)
results_kwargs = {}
# probability of proposing current when would be sitting at proposal is symmetric
return proposal, proposal_log_pdf, current_log_pdf, forw_backw_log_prob, forw_backw_log_prob, results_kwargs
class AdaptiveMetropolis(StaticMetropolis):
"""
Implements the adaptive MH. Performs efficient low-rank updates of Cholesky
factor of covariance. Covariance itself is not stored/updated, only its Cholesky factor.
"""
def __init__(self, D, target_log_pdf, step_size, gamma2, schedule=None, acc_star=None):
StaticMetropolis.__init__(self, D, target_log_pdf, step_size, schedule, acc_star)
self.gamma2 = gamma2
# assume that we have observed fake samples (makes system well-posed)
# these have covariance gamma2*I, which is a regulariser
# the mean and log_sum_weights however, is taken from the first set of samples in update
self.mu = None
self.L_C = None
self.log_sum_weights = None
def set_batch(self, Z):
# override streaming solution
self.mu = np.mean(Z, axis=0)
cov = np.cov(Z.T)
self.L_C = np.linalg.cholesky(cov + np.eye(self.D) * self.gamma2)
self.log_sum_weights = np.log(len(Z))
def update(self, Z, num_new=1, log_weights=None):
assert(len(Z) >= num_new)
# dont do anything if no data observed
if num_new == 0:
return
if log_weights is not None:
assert len(log_weights) == len(Z)
else:
log_weights = np.zeros(len(Z))
Z_new = Z[-num_new:]
log_weights_new = log_weights[-num_new:]
# first update: use first of X and log_weights, and then discard
if self.log_sum_weights is None:
# assume have observed fake terms, which is needed for making the system well-posed
# the L_C says that the fake terms had covariance self.lmbda, which is a regulariser
self.L_C = np.eye(self.D) * np.sqrt(self.gamma2)
self.log_sum_weights = log_weights_new[0]
self.mu = Z_new[0]
Z_new = Z_new[1:]
log_weights_new = log_weights_new[1:]
num_new -= 1
# dont do anything if no data observed
if len(Z_new) == 0:
return
# generate lmbdas that correspond to weighted averages
lmbdas = log_weights_to_lmbdas(self.log_sum_weights, log_weights_new)
# low-rank update of Cholesky, costs O(d^2) only
old_L_C = np.array(self.L_C, copy=True)
self.mu, self.L_C = update_mean_cov_L_lmbda(Z_new, self.mu, self.L_C, lmbdas)
if np.any(np.isnan(self.L_C)) or np.any(np.isinf(self.L_C)):
logger.warning("Numerical error while updating Cholesky factor of C.\n"
"Before update:\n%s\n"
"After update:\n%s\n"
"Updating data:\n%s\n"
"Updating log weights:\n%s\n"
"Updating lmbdas:\n%s\n"
% (str(old_L_C), str(self.L_C), str(Z_new), str(log_weights_new), str(lmbdas))
)
raise RuntimeError("Numerical error while updating Cholesky factor of C.")
# update terms and weights
self.log_sum_weights = logsumexp(list(log_weights) + [self.log_sum_weights])
class AdaptiveIndependentMetropolis(AdaptiveMetropolis):
"""
Implements an independent Gaussian proposal with given parameters.
However, stores mean and covariance in the same fashion as AdaptiveMetropolis
for debugging purposes, and debug outputs them
Schedule and acc_star are ignored.
"""
def __init__(self, D, target_log_pdf, step_size, gamma2, proposal_mu, proposal_L_C):
AdaptiveMetropolis.__init__(self, D, target_log_pdf, step_size, gamma2)
self.proposal_mu = proposal_mu
self.proposal_L_C = proposal_L_C
# store all log_weights of all proposals
self.log_weights = []
def proposal_log_pdf(self, current, proposals):
log_probs = log_gaussian_pdf_multiple(proposals, mu=self.proposal_mu,
Sigma=self.proposal_L_C, is_cholesky=True,
cov_scaling=self.step_size)
return log_probs
def proposal(self, current, current_log_pdf, **kwargs):
if current_log_pdf is None:
current_log_pdf = self.target_log_pdf(current)
proposal = sample_gaussian(N=1, mu=self.proposal_mu, Sigma=self.proposal_L_C,
is_cholesky=True, cov_scaling=self.step_size)[0]
forw_backw_log_prob = self.proposal_log_pdf(None, proposal[np.newaxis, :])[0]
backw_backw_log_prob = self.proposal_log_pdf(None, current[np.newaxis, :])[0]
proposal_log_pdf = self.target_log_pdf(proposal)
results_kwargs = {}
self.log_weights.append(proposal_log_pdf - forw_backw_log_prob)
# probability of proposing current when would be sitting at proposal is symmetric
return proposal, proposal_log_pdf, current_log_pdf, forw_backw_log_prob, backw_backw_log_prob, results_kwargs
def get_current_ess(self):
return kameleon_rks.samplers.tools.compute_ess(self.log_weights, normalize=True)
def update(self, Z, num_new, log_weights):
AdaptiveMetropolis.update(self, Z, num_new, log_weights)
cov = np.dot(self.L_C, self.L_C.T)
var = np.diag(cov)
logger.debug("mu: %s" % str(self.mu))
logger.debug("var: %s" % str(var))
logger.debug("cov: %s" % str(cov))
logger.debug("norm(mu): %.3f" % np.linalg.norm(self.mu))
logger.debug("np.mean(var): %.3f" % np.mean(var))
|
python
|
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
from enum import IntEnum
from a_tuin.metadata import (
ObjectFieldGroupBase,
StringField,
ObjectReferenceField,
Collection,
DescriptionField,
IntField,
IntEnumField,
)
class PPSStatus(IntEnum):
Requested = 1
Provided = 2
NotIncomeTaxPayer = 3 # parishioner is not an Irish income tax payer
NotProvided = 4 # parishioner responded but refused to provide PPS
ExcludedByAdmin = 5 # parishioner excluded through admin discretion
class PPSStatusField(IntEnumField):
def __init__(self, name, is_mutable=True, required=False, default=None, description=None, validation=None):
super().__init__(name, PPSStatus, is_mutable, required, default, description, validation)
class PPS(ObjectFieldGroupBase):
# Data usage
#
# Records PPS number for an individual in order that a tax rebate may be claimed
# on funds donated to the parish.
public_interface = (
ObjectReferenceField('person', required=True),
PPSStatusField(
'status',
required=True,
default=PPSStatus.Requested,
description='Has the parishioner responded to a request for a PPS?'
),
StringField('pps'),
StringField('name_override'),
IntField('chy3_valid_year', description='The first financial year the most recent CHY3 form is valid from'),
DescriptionField('notes')
)
class PPSCollection(Collection):
pass
|
python
|
"""
Tools for segmenting positional AIS messages into continuous tracks.
Includes a CLI plugin for `gpsdio` to run the algorithm.
"""
from gpsdio_segment.segment import BadSegment
from gpsdio_segment.segment import Segment
from gpsdio_segment.core import Segmentizer
__version__ = '0.20.2'
__author__ = 'Paul Woods'
__email__ = '[email protected]'
__source__ = 'https://github.com/SkyTruth/gpsdio-segment'
__license__ = """
Copyright 2015-2017 SkyTruth
Authors:
Kevin Wurster <[email protected]>
Paul Woods <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
python
|
# -*- coding: utf-8 -*-
"""
A utility library for interfacing with the SRF-08 and SRF-10 ultrasonic
rangefinders.
http://www.robot-electronics.co.uk/htm/srf08tech.shtml
http://www.robot-electronics.co.uk/htm/srf10tech.htm
Utilizes I2C library for reads and writes.
The MIT License (MIT)
Copyright (c) 2015 Martin Clemons
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyb import I2C
class SRF_RANGE_UNITS:
""" SRF-XX rangefinder constants. """
IN = 0x50
CM = 0x51
US = 0x52
class SRFBase(object):
"""
A base class for SRF08 and SRF10 rangefinders.
Essentially a SRF-xx rangefinder emulates a 24xx series EEPROM and implements
a number of user readable and writable registers. These registers map to
the specific hardware functions and readings from the rangefinder.
Since the '08 and '10 are very similar in their functionality this class
serves as a base implementation which can be overridden to form a class
for a specific sensor.
"""
def __init__(self, *args, **kwargs):
"""
If any arguments are present self.init() is called.
"""
super(SRFBase, self).__init__()
self.i2c = None
self.bus_addr = None
self.rxb = bytearray(4)
if len(args) > 0:
self.init(*args, **kwargs)
def init(self, *args, **kwargs):
"""
Initialize a SRF sensor instance.
There are two options for parameters passed when calling this function:
1. Pass the initialization parameters for an pyb.I2C object.
The initialization parameters will be used to initialize a new I2C
instance which will be used to communicate with the sensor.
If bus_address has not been set, a bus scan will be performed and the
first available address will be used.
2. Pass an already initialized pyb.I2C object.
The instance passed in will be used to communicate with the sensor.
The I2C instance should be initialized before any methods which
require communication are called.
"""
if len(args) < 1:
raise TypeError('Please supply an I2C object or bus number.')
if type(args[0]) is int:
# assume first argument is bus number for I2C constructor
self.i2c = I2C(*args, **kwargs)
if self.bus_addr is None:
try:
# assign address of first device found
self.bus_addr = self.i2c.scan()[0]
except TypeError:
raise Exception('Sensor not found on I2C bus.')
else:
# first argument is initialized I2C bus object
self.i2c = args[0]
def deinit(self):
"""
De-init sensor instance.
Calls deinit() on I2C instance associated with sensor, and also resets
sensor bus address.
"""
try:
self.i2c.deinit()
except AttributeError:
pass
self.i2c = None
self.bus_addr = None
def bus_address(self, *args):
"""
Sets the rangefinder I2C bus address if provided, otherwise returns the
current rangefinder bus address.
"""
if len(args) > 0:
self.bus_addr = args[0]
else:
return self.bus_addr
def scan_bus(self):
"""
Scans I2C bus and returns a list of addresses found.
"""
return self.i2c.scan()
def sw_rev(self):
"""
Returns the software revision of sensor.
"""
rev = bytearray((256,))
self.i2c.mem_read(rev, self.bus_addr, 0)
if rev[0] > 255:
raise Exception('Error reading from sensor.')
return rev[0]
def set_max_range(self, range_mm):
"""
Sets the maximum range of the sensor.
:param range_mm: Integer range in mm, min. 43mm max 11008mm.
:return:
"""
if range_mm < 43:
raise ValueError('Minimum range is 43mm.')
if range_mm > 11008:
raise ValueError('Maximum range is 11008mm.')
c = int(range_mm) // 43 - 1
self.i2c.mem_write(c, self.bus_addr, 2)
def set_analog_gain(self, gain):
"""
Sets the analog gain of the sensor.
:param gain: Sensor gain register value.
:return:
"""
if gain < 0:
raise ValueError('Gain register must be greater than 0.')
self.i2c.mem_write(int(gain), self.bus_addr, 1)
def measure_range(self, units=SRF_RANGE_UNITS.CM):
"""
Initiate rangefinder ranging.
:param units: SRF_RANGE_UNITS, either IC, CM, or US for µ seconds.
:return:
"""
self.i2c.mem_write(units, self.bus_addr, 0)
def read_range(self):
"""
Read the range registers after ranging has completed.
:param:
:return: A list of integer range values in the units specified by
measure_range(). In the case of sensors which report multiple echos,
the first item in the list represents the first echo and the nth item
represents the nth echo. If no echos were returned list will be empty.
"""
self.i2c.mem_read(self.rxb, self.bus_addr, 0)
values = []
# skip first 2 bytes, then unpack high and low bytes from buffer data
# data is pack in big-endian form
for i in range(2, len(self.rxb), 2):
range_val = (self.rxb[i] << 8) + self.rxb[i+1]
if range_val > 0:
values.append(range_val)
return values
class SRF08(SRFBase):
"""
A SRF08 Rangefinder.
Supports up to 17 echo range values.
Maximum analog gain of 31.
TODO: Add ability to read light meter.
"""
def __init__(self, *args, **kwargs):
super(SRF08, self).__init__(*args, **kwargs)
self.rxb = bytearray(36)
def __str__(self):
return '<SRF08 address {} on {}>'.format(self.bus_addr, self.i2c)
def set_analog_gain(self, gain):
if gain > 31:
raise ValueError('Gain register must be less than or equal to 31.')
super(SRF08, self).set_analog_gain(gain)
class SRF10(SRFBase):
"""
A SRF10 rangefinder.
Supports single echo range value.
Maximum analog gain of 16.
"""
def __str__(self):
return '<SRF10 address {} on {}>'.format(self.bus_addr, self.i2c)
def set_analog_gain(self, gain):
if gain > 16:
raise ValueError('Gain register must be less than or equal to 16.')
super(SRF10, self).set_analog_gain(gain)
|
python
|
import sys
import time
from datetime import timedelta, datetime as dt
from monthdelta import monthdelta
import holidays
import re
import threading
import inspect
from contextlib import contextmanager
import traceback
import logging
# default logging configuration
# logging.captureWarnings(True)
LOG_FORMATTER = logging.Formatter('%(message)s')
LOGGER_NAME = 'flask_production'
LOGGER = logging.getLogger(LOGGER_NAME)
LOGGER.setLevel(logging.INFO)
from ._capture import print_capture
USHolidays = holidays.US()
class _JobRunLogger(object):
'''
logging class to capture any print statements within a job
also captures start time, end time and error traceback
'''
def __init__(self):
self._lock = threading.Lock()
self._reset()
@property
def log(self):
with self._lock:
return self._run_log
@property
def error(self):
with self._lock:
return self._err_log
@property
def started_at(self):
with self._lock:
return self._started_at
@property
def ended_at(self):
with self._lock:
return self._ended_at
def _reset(self):
'''clear previous run info'''
with self._lock:
self._run_log = ''
self._err_log = ''
self._started_at = None
self._ended_at = None
def _log_callback(self, msg: str):
'''
writting to stderr since stdout is being redirected here. Using print() will be circular
log to file using the logging library if LOGGER handler is set by TaskScheduler
'''
if msg.strip()=='':return
msg = msg.replace('\r\n', '\n') # replace line endings to work correctly
sys.stderr.write(msg)
if len(LOGGER.handlers)>0:
LOGGER.info(msg.strip())
with self._lock:
self._run_log += msg
@contextmanager
def start_capture(self):
'''
begin recording print statements
'''
self._reset() # clear previous run info
with self._lock:
self._started_at = dt.now()
with print_capture(callback=self._log_callback):
yield
with self._lock:
self._ended_at = dt.now()
def set_error(self):
'''called when job throws error'''
with self._lock:
self._err_log = traceback.format_exc()
def to_dict(self):
with self._lock:
return dict(
log=self._run_log,
err=self._err_log,
start=self._started_at,
end=self._ended_at,
)
class Job(object):
'''standard job class'''
RUNABLE_DAYS = {
'day': lambda d, hols : True,
'weekday': lambda d, hols : d.isoweekday() < 6,
'weekend': lambda d, hols : d.isoweekday() > 5,
'businessday': lambda d, hols : d not in hols and d.isoweekday() < 6,
'holiday': lambda d, hols : d in hols or d.isoweekday() > 5,
'trading-holiday': lambda d, hols : d in hols,
# days of the week
'monday': lambda d, hols: d.isoweekday() == 1,
'tuesday': lambda d, hols: d.isoweekday() == 2,
'wednesday': lambda d, hols: d.isoweekday() == 3,
'thursday': lambda d, hols: d.isoweekday() == 4,
'friday': lambda d, hols: d.isoweekday() == 5,
'saturday': lambda d, hols: d.isoweekday() == 6,
'sunday': lambda d, hols: d.isoweekday() == 7,
}
@classmethod
def is_valid_interval(cls, interval):
return interval in cls.RUNABLE_DAYS
def __init__(self, every, at, func, kwargs):
if str(every) == 'holiday':
print("!!", "="*20, "!!")
print("'holiday' interval is deprecated and will be removed. \r\nUse 'weekend' and 'trading-holiday' instead")
print("!!", "="*20, "!!")
self.interval = every
self.time_string = at
self.func = func
self.kwargs = kwargs
self.is_running = False
self._run_silently = False
self._generic_err_handler = None
self._err_handler = None
self._func_src_code = inspect.getsource(self.func)
def init(self, calendar, generic_err_handler=None, startup_offset=300):
'''initialize extra attributes of job'''
self.calendar = calendar
self._generic_err_handler = generic_err_handler
self._startup_offset = startup_offset
self._run_info = _JobRunLogger()
self.schedule_next_run()
print(self)
return self
def silently(self):
self._run_silently = True
return self
def catch(self, err_handler):
'''register job specific error handler'''
self._err_handler = err_handler
return self
@staticmethod
def to_timestamp(d):
return time.mktime(d.timetuple())+d.microsecond/1000000.0
def schedule_next_run(self, just_ran=False):
'''compute timestamp of the next run'''
h, m = self.time_string.split(':')
n = dt.now()
n = dt(n.year, n.month, n.day, int(h), int(m), 0)
ts = self.to_timestamp(n)
if self._job_must_run_today() and not just_ran and time.time() < ts+self._startup_offset:
self.next_timestamp = ts
else:
next_day = n + timedelta(days=1)
while not self._job_must_run_today(next_day):
next_day += timedelta(days=1)
self.next_timestamp = self.to_timestamp(next_day)#next_day.timestamp()
def _job_must_run_today(self, date=None):
return self.RUNABLE_DAYS[self.interval](date or dt.now(), self.calendar)
def is_due(self):
'''test if job should run now'''
return (time.time() >= self.next_timestamp) and not self.is_running
def did_fail(self):
'''test if job failed'''
return self._run_info.error != ''
def run(self, is_rerun=False):
'''
begin job run
redirected all print statements to _JobRunLogger
call error handlers if provided
'''
with self._run_info.start_capture(): # captures all writes to stdout
self.is_running = True
try:
if not self._run_silently: # add print statements
print("========== Job {} [{}] =========".format(
"Rerun Start" if is_rerun else "Start",
dt.now().strftime("%Y-%m-%d %H:%M:%S")
))
print("Executing {}".format(self))
print("*") # job log seperator
start_time = time.time()
return self.func(**self.kwargs)
except Exception:
print("Job", self.func.__name__, "failed!")
err_msg = "Error in <{}>\n\n\n{}".format(self.func.__name__, traceback.format_exc())
self._run_info.set_error()
try:
if self._err_handler is not None:
self._err_handler(err_msg) # job specific error callback registered through .catch()
elif self._generic_err_handler is not None:
self._generic_err_handler(err_msg) # generic error callback from scheduler
except:
traceback.print_exc()
finally:
# if the job was forced to rerun, we should not schedule the next run
if not is_rerun:
self.schedule_next_run(just_ran=True)
if not self._run_silently: # add print statements
print("*") # job log seperator
print( "Finished in {:.2f} minutes".format((time.time()-start_time)/60))
print(self)
print("========== Job {} [{}] =========".format(
"Rerun End" if is_rerun else "End",
dt.now().strftime("%Y-%m-%d %H:%M:%S")
))
self.is_running = False
def _next_run_dt(self):
return dt.fromtimestamp(self.next_timestamp) if self.next_timestamp!=0 else None
def to_dict(self):
'''property to access job info dict'''
return dict(
func=self.func.__name__,
src=self._func_src_code,
doc=self.func.__doc__,
type=self.__class__.__name__,
every=self.interval,
at=self.time_string,
is_running=self.is_running,
next_run=self._next_run_dt(),
logs=self._run_info.to_dict() if hasattr(self, '_run_info') else {}
)
def __repr__(self):
d = self._next_run_dt()
return "{} {}. Next run = {}".format(
self.__class__.__name__, self.func.__name__,
d.strftime("%Y-%m-%d %H:%M:%S") if isinstance(d, dt) else 'Never'
)
class OneTimeJob(Job):
'''type of job that runs only once'''
@classmethod
def is_valid_interval(cls, interval):
try:
dt.strptime(interval, "%Y-%m-%d")
return True
except:
return False
def schedule_next_run(self, just_ran=False):
H, M = self.time_string.split(':')
Y, m, d = self.interval.split('-')
n = dt(int(Y), int(m), int(d), int(H), int(M), 0)
if just_ran or dt.now() > n + timedelta(minutes=3):
self.next_timestamp = 0
else:
self.next_timestamp = self.to_timestamp(n)
def is_due(self):
if self.next_timestamp==0: raise JobExpired('remove me!')
return super().is_due()
class RepeatJob(Job):
'''type of job that runs every n seconds'''
@classmethod
def is_valid_interval(cls, interval):
return isinstance(interval, (int, float))
def schedule_next_run(self, just_ran=False):
if not isinstance(self.interval, (int, float)) or self.interval <= 0:
raise BadScheduleError("Illegal interval for repeating job. Expected number of seconds")
if just_ran:
self.next_timestamp += self.interval
else:
self.next_timestamp = time.time() + self.interval
class MonthlyJob(Job):
'''
type of job that can be scheduled to run once per month
example interval 1st, 22nd, 30th
limitation: we cannot intuitively handle dates >= 29 for all months
- ex: 29th will fail for non leap-Feb, 31st will fail for months having less than 31 days
- use '_strict_date' when handing dates >= 29:
if self._strict_date == True:
job is scheduled only on months which have the date (ex: 31st)
elif self._strict_date == False:
run on the last day of the month if date exceeds current month
'''
PATTERN = re.compile(r"^(\d{1,2})(st|nd|rd|th)$", re.IGNORECASE)
def __init__(self, every, at, func, kwargs, strict_date):
if not isinstance(strict_date, bool):
raise BadScheduleError("call to .strict_date() required for monthly schedule. ex: .every('31st').strict_date(True)..")
self._strict_date = strict_date
super().__init__(every, at, func, kwargs)
@classmethod
def is_valid_interval(cls, interval):
# example intervals - 1st, 22nd, 30th
match = cls.PATTERN.match(str(interval))
return match is not None and int(match.groups()[0]) <= 31
def __last_day_of_month(self, d):
return ((d + monthdelta(1)).replace(day=1) - timedelta(days=1)).day
def schedule_next_run(self, just_ran=False):
interval = int(self.PATTERN.match(self.interval).groups()[0])
H, M = self.time_string.split(':')
sched_day = dt.now()
# switch to next month if
# - task just ran, or
# - day has already passed, or
# - day is today, but time has already passed
day_passed = interval < sched_day.day # True if day already passed this month
time_passed = interval == sched_day.day and (int(H) < sched_day.hour or (int(H) == sched_day.hour and (int(M) + 3 ) < sched_day.minute)) # 3 min look back on tasks
if just_ran or day_passed or time_passed:
sched_day += monthdelta(1) # switch to next month
# handle cases where the interval day doesn't occur in all months (ex: 31st)
if interval > self.__last_day_of_month(sched_day):
if self._strict_date==False:
interval = self.__last_day_of_month(sched_day) # if strict is false, run on what ever is last day of the month
else: # strict
while interval > self.__last_day_of_month(sched_day): # run only on months which have the date
sched_day += monthdelta(1)
n = sched_day.replace(day=interval, hour=int(H), minute=int(M), second=0, microsecond=0)
self.next_timestamp = self.to_timestamp(n)
def __repr__(self):
return "{}[ strict={} ] {}. Next run = {}".format(
self.__class__.__name__, self._strict_date, self.func.__name__,
self._next_run_dt().strftime("%Y-%m-%d %H:%M:%S")
)
class AsyncJobWrapper(object):
'''wrapper to run the job on a parallel thread'''
def __init__(self, job):
self.job = job
self.proc = None
def __getattr__(self, name):
return self.job.__getattribute__(name)
def is_due(self):
return self.job.is_due()
def run(self, *args, **kwargs):
self.proc = threading.Thread(target=self.job.run, args=args, kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
class JobExpired(Exception):
pass
class BadScheduleError(Exception):
pass
class TaskScheduler(object):
'''task scheduler class to manage and run jobs'''
def __init__(self,
check_interval=5,
holidays_calendar=None,
on_job_error=None,
log_filepath=None):
self.jobs = []
self.on = self.every
self._check_interval = check_interval
self.interval = None
self.temp_time = None
if holidays_calendar is not None:
self.holidays_calendar = holidays_calendar
else:
self.holidays_calendar = USHolidays
self.on_job_error = on_job_error
self.log_filepath = log_filepath
if self.log_filepath is not None:
fh = logging.FileHandler(self.log_filepath)
fh.setFormatter(LOG_FORMATTER)
LOGGER.addHandler(fh)
self._strict_monthly = None
def __current_timestring(self):
return dt.now().strftime("%H:%M")
def every(self, interval):
'''
interval is either one of the keys of Job.RUNABLE_DAYS
or integer denoting number of seconds for RepeatJob
'''
self.interval = interval
return self
def strict_date(self, strict):
'''
required to be called when scheduling MonthlyJob
- see MonthlyJob docstring
'''
if not MonthlyJob.is_valid_interval(self.interval) or not isinstance(strict, bool):
raise BadScheduleError(".strict_date(bool) only used for monthly schedule. ex: .every('31st').strict_date(True)..")
self._strict_monthly = strict
return self
def at(self, time_string):
'''
24 hour time string of when to run job
example: '15:00' for 3PM
'''
if self.interval is None: self.interval = 'day'
self.temp_time = time_string
return self
def do(self, func, do_parallel=False, **kwargs):
'''
register 'func' for the job
run in a prallel thread if do_parallel is True
pass kwargs into 'func' at execution
'''
if self.interval is None: raise Exception('Run .at()/.every().at() before .do()')
if self.temp_time is None: self.temp_time = self.__current_timestring()
if RepeatJob.is_valid_interval(self.interval):
j = RepeatJob(self.interval, None, func, kwargs)
elif OneTimeJob.is_valid_interval(self.interval):
j = OneTimeJob(self.interval, self.temp_time, func, kwargs)
elif MonthlyJob.is_valid_interval(self.interval):
j = MonthlyJob(self.interval, self.temp_time, func, kwargs, strict_date=self._strict_monthly)
elif Job.is_valid_interval(self.interval):
j = Job(self.interval, self.temp_time, func, kwargs)
else:
raise BadScheduleError("{} is not valid\n".format(self.interval))
j.init(
calendar=self.holidays_calendar,
generic_err_handler=self.on_job_error
)
if do_parallel:
j = AsyncJobWrapper(j)
self.jobs.append(j)
self.temp_time = None
self.interval = None
self._strict_monthly = None
return j
def check(self):
'''check if a job is due'''
for j in self.jobs.copy(): # work on copy of this list - safer in case the list changes
try:
if j.is_due(): j.run()
except JobExpired:
self.jobs.remove(j)
def start(self):
'''blocking function that checks for jobs every 'check_interval' seconds'''
self._running_auto = True
try:
while self._running_auto:
try:
self.check()
time.sleep(self._check_interval)
except KeyboardInterrupt:
print("KeyboardInterrupt")
self.stop()
finally:
print("Stopping. Please wait, checking active async jobs ..")
self.join()
print(self, "Done!")
def join(self):
'''wait for any async jobs to complete'''
for j in self.jobs:
if isinstance(j, AsyncJobWrapper) and j.is_running: # Kill any running parallel tasks
j.proc.join()
print(j, "exited")
def stop(self):
'''stop job started with .start() method'''
self._running_auto = False
def rerun(self, job_index):
if job_index < 0 or job_index >= len(self.jobs):
raise IndexError("Invalid job index")
j = self.jobs[job_index]
if j.is_running:
raise RuntimeError("Cannot rerun a running task")
if not isinstance(j, AsyncJobWrapper):
j = AsyncJobWrapper(j)
j.run(is_rerun=True)
|
python
|
token = '1271828065:AAFCFSuz_vX71bxzZSdhLSLhUnUgwWc0t-k'
|
python
|
###########################
#
# #764 Asymmetric Diophantine Equation - Project Euler
# https://projecteuler.net/problem=764
#
# Code by Kevin Marciniak
#
###########################
|
python
|
## What is Lambda : anonymous function or function without name
## Usecase is you can pass a function as an argument, quick function
# def double(num):
# x = num + num
# return x
# print(double(6))
# lambda num: num + num
# x = lambda a : a + 10
# print(x(5))
#Example 2
# my_list = [1, 5, 4, 6, 8, 11, 3, 12]
# new_list = list(filter(lambda x: (x%2 == 0) , my_list))
# print(new_list)
# #Example 3
# my_list = [1, 5, 4, 6, 8, 11, 3, 12]
# new_list = list(map(lambda x: x * 2 , my_list))
# print(new_list)
#Example 4
import pandas as pd
df = pd.DataFrame({
'Name': ['Luke','Gina','Sam','Emma'],
'Status': ['Father', 'Mother', 'Son', 'Daughter'],
'Birthyear': [1976, 1984, 2013, 2016],
})
df['age'] = df['Birthyear'].apply(lambda x: 2021-x)
print(df)
# listA = [4, "string1", lambda num: num * num]
# print(listA[2](8))
# array = [3,6,7]
# def double(num):
# return num + num
# print(list(map(double, array)))
# print(list(map(lambda num: num + num, array)))
|
python
|
"""
Utility functions for dealing with NER tagging.
"""
import logging
logger = logging.getLogger('stanza')
def is_basic_scheme(all_tags):
"""
Check if a basic tagging scheme is used. Return True if so.
Args:
all_tags: a list of NER tags
Returns:
True if the tagging scheme does not use B-, I-, etc, otherwise False
"""
for tag in all_tags:
if len(tag) > 2 and tag[:2] in ('B-', 'I-', 'S-', 'E-'):
return False
return True
def is_bio_scheme(all_tags):
"""
Check if BIO tagging scheme is used. Return True if so.
Args:
all_tags: a list of NER tags
Returns:
True if the tagging scheme is BIO, otherwise False
"""
for tag in all_tags:
if tag == 'O':
continue
elif len(tag) > 2 and tag[:2] in ('B-', 'I-'):
continue
else:
return False
return True
def to_bio2(tags):
"""
Convert the original tag sequence to BIO2 format. If the input is already in BIO2 format,
the original input is returned.
Args:
tags: a list of tags in either BIO or BIO2 format
Returns:
new_tags: a list of tags in BIO2 format
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag[0] == 'I':
if i == 0 or tags[i-1] == 'O' or tags[i-1][1:] != tag[1:]:
new_tags.append('B' + tag[1:])
else:
new_tags.append(tag)
else:
new_tags.append(tag)
return new_tags
def basic_to_bio(tags):
"""
Convert a basic tag sequence into a BIO sequence.
You can compose this with bio2_to_bioes to convert to bioes
Args:
tags: a list of tags in basic (no B-, I-, etc) format
Returns:
new_tags: a list of tags in BIO format
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif i == 0 or tags[i-1] == 'O' or tags[i-1] != tag:
new_tags.append('B-' + tag)
else:
new_tags.append('I-' + tag)
return new_tags
def bio2_to_bioes(tags):
"""
Convert the BIO2 tag sequence into a BIOES sequence.
Args:
tags: a list of tags in BIO2 format
Returns:
new_tags: a list of tags in BIOES format
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
else:
if len(tag) < 2:
raise Exception(f"Invalid BIO2 tag found: {tag}")
else:
if tag[:2] == 'I-': # convert to E- if next tag is not I-
if i+1 < len(tags) and tags[i+1][:2] == 'I-':
new_tags.append(tag)
else:
new_tags.append('E-' + tag[2:])
elif tag[:2] == 'B-': # convert to S- if next tag is not I-
if i+1 < len(tags) and tags[i+1][:2] == 'I-':
new_tags.append(tag)
else:
new_tags.append('S-' + tag[2:])
else:
raise Exception(f"Invalid IOB tag found: {tag}")
return new_tags
def process_tags(sentences, scheme):
res = []
# check if tag conversion is needed
convert_bio_to_bioes = False
convert_basic_to_bioes = False
is_bio = is_bio_scheme([x[1] for sent in sentences for x in sent])
is_basic = not is_bio and is_basic_scheme([x[1] for sent in sentences for x in sent])
if is_bio and scheme.lower() == 'bioes':
convert_bio_to_bioes = True
logger.debug("BIO tagging scheme found in input; converting into BIOES scheme...")
elif is_basic and scheme.lower() == 'bioes':
convert_basic_to_bioes = True
logger.debug("Basic tagging scheme found in input; converting into BIOES scheme...")
# process tags
for sent in sentences:
words, tags = zip(*sent)
# NER field sanity checking
if any([x is None or x == '_' for x in tags]):
raise ValueError("NER tag not found for some input data.")
if convert_basic_to_bioes:
# if basic, convert tags -> bio -> bioes
tags = bio2_to_bioes(basic_to_bio(tags))
else:
# first ensure BIO2 scheme
tags = to_bio2(tags)
# then convert to BIOES
if convert_bio_to_bioes:
tags = bio2_to_bioes(tags)
res.append([(w,t) for w,t in zip(words, tags)])
return res
def decode_from_bioes(tags):
"""
Decode from a sequence of BIOES tags, assuming default tag is 'O'.
Args:
tags: a list of BIOES tags
Returns:
A list of dict with start_idx, end_idx, and type values.
"""
res = []
ent_idxs = []
cur_type = None
def flush():
if len(ent_idxs) > 0:
res.append({
'start': ent_idxs[0],
'end': ent_idxs[-1],
'type': cur_type})
for idx, tag in enumerate(tags):
if tag is None:
tag = 'O'
if tag == 'O':
flush()
ent_idxs = []
elif tag.startswith('B-'): # start of new ent
flush()
ent_idxs = [idx]
cur_type = tag[2:]
elif tag.startswith('I-'): # continue last ent
ent_idxs.append(idx)
cur_type = tag[2:]
elif tag.startswith('E-'): # end last ent
ent_idxs.append(idx)
cur_type = tag[2:]
flush()
ent_idxs = []
elif tag.startswith('S-'): # start single word ent
flush()
ent_idxs = [idx]
cur_type = tag[2:]
flush()
ent_idxs = []
# flush after whole sentence
flush()
return res
|
python
|
from dataclasses import dataclass
from typing import Callable
import torch
Logits = torch.FloatTensor
@dataclass
class Sample:
logits: Logits
tokens: torch.LongTensor
Sampler = Callable[[Logits], Sample]
def standard(temperature: float = 1.0) -> Sampler:
def sample(logits: Logits) -> Sample:
logits = logits / (temperature + 1e-7)
# There was a regression in torch that made categorical only work with fp32.
# We can track the issue on github and remove this once it makes it into a
# pytorch release or nightly:
#
# https://github.com/pytorch/pytorch/issues/29211
#
logits_fp32 = logits.float()
return Sample(
logits=logits, tokens=torch.distributions.Categorical(logits=logits_fp32).sample()
)
return sample
def argmax() -> Sampler:
def sample(logits: Logits) -> Sample:
return Sample(logits=logits, tokens=torch.argmax(logits, dim=-1))
return sample
def nucleus_sampler(top_p: float = 0.9, temperature=1.0) -> Sampler:
"""
Return a sampler that decides diversity via nucleus sampling.
p=0.9 means that the top 90% of likelihood-weighted options are considered. p=0.0 is
equivalent to argmax, p=1.0 has no effect.
When a logit is on the boundary of being included or not being included, default
to including it.
"""
if top_p == 0.0:
return argmax()
if top_p == 1.0:
return standard(temperature=temperature)
def sample(logits: Logits) -> Sample:
"""
Remove logits that do not represent the top_p proportion of likelihoods.
When a logit is on the boundary of being included or not being included, default
to including it.
"""
logits = logits.clone()
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold.
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold.
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_(
dim=-1, index=sorted_indices, src=sorted_indices_to_remove
)
logits[indices_to_remove] = -float("Inf")
return standard(temperature=temperature)(logits)
return sample
|
python
|
from numpy import NaN
import pandas as pd
import requests
import math
import re
from bs4 import BeautifulSoup
class Propertypro:
"""
web-scraper tool for scraping data on propertypro.ng
Parameters:
num_samples (int): The number of samples of data to be scraped.
location (list): list of keywords to scrape
Returns:
pd.DataFrame: Returns a dataframe with the following categories as columns:
title, location, price, number of bedrooms, toilets, bathroom, whether it is furnished, serviced and newly built
"""
def __init__(self) -> None:
self.no_samples = 0
def process_data(self, dataframe: pd.DataFrame) -> pd.DataFrame:
"""
cleans data from the provided Dataframe.
:param data: Scraped data .
:return: pandas dataframe
"""
data = dataframe
data = data.dropna()
data['rooms'] = data['rooms'].str.split('\n')
data[['nothing', 'bedroom', 'bathroom', 'toilet', 'remove']] = pd.DataFrame(data['rooms'].tolist(), index= data.index)
data['bedroom'] = data['bedroom'].str.strip('beds')
data['bathroom'] = data['bathroom'].str.strip('baths')
data['toilet'] = data['toilet'].str.strip('Toilets')
data['price'] = data['price'].str.replace(r'[^0-9]+','')
data['furnishing'] = data['furnishing'].str.split('\n')
data['newly_built'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Newly Built" in x else '0']))
data['furnished'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Furnished" in x else '0']))
data['serviced'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Serviced" in x else '0']))
data = data.drop(columns=['rooms', 'nothing', 'remove', 'furnishing'])
return data
def scrape_data(self, no_samples, keywords):
"""
Scrapes data from provided urls
:param : no_samples, keywords
:return: pandas dataFrame.
"""
data = {"title": [], "location": [], "furnishing": [], "rooms": [], "price": []}
for keyword in keywords:
page_url = []
for i in range(0,round((no_samples/22))):
page_url.append('https://www.propertypro.ng/property-for-rent/in/' + keyword + '?search=&type=&bedroom=&min_price=&max_price=&page=' + str(i))
for links in page_url:
response = requests.get(links)
soup = BeautifulSoup(response.content, 'html.parser')
for title in soup.find_all('h2', { 'class':"listings-property-title" }):
data["title"].append(title.text)
data["location"].append(keyword)
for furnishing in soup.find_all('div', {'class': "furnished-btn"}):
data["furnishing"].append(furnishing.text)
for rooms in soup.find_all('div', {'class': "fur-areea"}):
data["rooms"].append(rooms.text)
for price in soup.find_all('h3', { 'class': 'listings-price' }):
data["price"].append(price.text)
page_url.clear()
# df = pd.DataFrame(data)
df = pd.DataFrame.from_dict(data, orient='index')
df = df.transpose()
pd.set_option("display.max_rows", None, "display.max_columns", None)
df = self.process_data(df)
return df
|
python
|
from oslo_log import log as logging
from oslo_messaging import RemoteError
from nca47.api.controllers.v1 import base
from nca47.api.controllers.v1 import tools
from nca47.common.exception import NonExistParam
from nca47.common.exception import ParamFormatError
from nca47.common.exception import ParamNull
from nca47.common.exception import ParamValueError
from nca47.common.exception import Nca47Exception
from nca47.common.exception import BadRequest
from nca47.common.i18n import _
from nca47.common.i18n import _LE
from nca47.manager import central
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1.tools import check_areaname
from nca47.api.controllers.v1.tools import check_ttl
from nca47.api.controllers.v1.tools import check_renewal
from nca47.api.controllers.v1.tools import is_not_list
LOG = logging.getLogger(__name__)
class DnsZonesController(base.BaseRestController):
"""
nca47 dnsZones class, using for add/delete/update/query the zones info,
validate parameters whether is legal, handling DB operations and calling
rpc client's corresponding method to send messaging to agent endpoints
"""
def __init__(self):
self.manager = central.CentralManager.get_instance()
super(DnsZonesController, self).__init__()
def create(self, req, *args, **kwargs):
"""create the dns zones"""
# get the context
context = req.context
try:
# get the body
values = json.loads(req.body)
# get the url
url = req.url
# if len(args) != 1:
# raise BadRequest(resource="zone create", msg=url)
if 'default_ttl' not in values.keys():
values['default_ttl'] = "3600"
if 'renewal' not in values.keys():
raise NonExistParam(param_name='renewal')
if values['renewal'] == 'no':
# check the in values
valid_attributes = ['name', 'owners', 'default_ttl', 'renewal',
'tenant_id']
elif values['renewal'] == 'yes':
# check the in values
valid_attributes = ['name', 'owners', 'default_ttl', 'renewal',
'zone_content', 'slaves', 'tenant_id']
else:
raise ParamValueError(param_name='renewal')
# check the in values
recom_msg = self.validat_parms(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server create the zones in db and device
zones = self.manager.create_zone(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except RemoteError as e:
self.response.status = 500
message = e.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
return tools.ret_info(self.response.status, exception.message)
return zones
def update(self, req, id, *args, **kwargs):
"""update the dns zones by currentUser/owners"""
# get the context
context = req.context
try:
# get the url
url = req.url
# if len(args) > 2:
# raise BadRequest(resource="zone update", msg=url)
# get the body
values = json.loads(req.body)
values['id'] = id
LOG.info(_("the in value body is %(body)s"), {"body": values})
LOG.info(_("the id is %(id)s"), {"id": id})
if kwargs.get('owners'):
# check the in values
valid_attributes = ['id', 'tenant_id', 'owners']
recom_msg = self.validat_parms(values, valid_attributes)
# from rpc server update the zones in db and device
zones = self.manager.update_zone_owners(context, recom_msg,
recom_msg['id'])
else:
# check the in values
valid_attributes = ['id', 'tenant_id', 'default_ttl']
recom_msg = self.validat_parms(values, valid_attributes)
# from rpc server update the zones in db and device
zones = self.manager.update_zone(context, recom_msg,
recom_msg['id'])
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
return tools.ret_info(self.response.status, exception.message)
return zones
def remove(self, req, id, *args, **kwargs):
"""delete the dns zones"""
# get the context
context = req.context
try:
# get the url
url = req.url
# if len(args) != 1:
# raise BadRequest(resource="zone delete", msg=url)
# get the body
values = {}
values.update(kwargs)
values['id'] = id
LOG.info(_("the in value body is %(body)s"), {"body": values})
# check the in values
valid_attributes = ['tenant_id', 'id']
recom_msg = self.validat_parms(values, valid_attributes)
# from rpc server delete the zones in db and device
zones = self.manager.delete_zone(context, recom_msg['id'])
except Nca47Exception as e:
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
return tools.ret_info(self.response.status, exception.message)
return zones
def list(self, req, *args, **kwargs):
"""get the list of the dns zones"""
# get the context
context = req.context
try:
if kwargs.get('device'):
LOG.info(_(" args is %(args)s, kwargs is %(kwargs)s"),
{"args": args, "kwargs": kwargs})
# from rpc server get the zones in device
zones = self.manager.get_zones(context)
else:
# get the body
values = {}
values.update(kwargs)
LOG.info(_(" args is %(args)s, kwargs is %(kwargs)s"),
{"args": args, "kwargs": kwargs})
# check the in values
valid_attributes = ['tenant_id']
recom_msg = self.validat_parms(values, valid_attributes)
# from rpc server get the zones in db
zones = self.manager.get_db_zones(context, recom_msg)
LOG.info(_("Return of get_all_db_zone JSON is %(zones)s !"),
{"zones": zones})
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
return tools.ret_info(self.response.status, exception.message)
return zones
def show(self, req, id, *args, **kwargs):
"""get one dns zone info"""
# get the context
context = req.context
try:
if kwargs.get('device'):
LOG.info(_(" args is %(args)s"), {"args": args})
# from rpc server get the zone in device
zones = self.manager.get_zones(context)
else:
LOG.info(_(" args is %(args)s"), {"args": args})
# from rpc server get the zone in db
zones = self.manager.get_zone_db_details(context, id)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
return tools.ret_info(self.response.status, exception.message)
return zones
def validat_parms(self, values, valid_keys):
"""check the in value is null and nums"""
recom_msg = tools.validat_values(values, valid_keys)
for value in recom_msg:
if value == "name":
try:
spe_char = '.'
char = values[value][-1]
if not cmp(spe_char, char):
recom_msg[value] = values[value][:-1]
if not check_areaname(recom_msg[value]):
raise ParamFormatError(param_name=value)
except Exception:
raise ParamFormatError(param_name=value)
elif value == "default_ttl":
if not check_ttl(values['default_ttl']):
raise ParamFormatError(param_name=value)
elif value == "renewal":
if not check_renewal(values['renewal']):
raise ParamValueError(param_name=value)
elif value == "owners":
flag = is_not_list(values['owners'])
if flag == "0":
raise ParamFormatError(param_name=value)
elif flag == "1":
raise ParamNull(param_name=value)
elif value == "slaves":
flag = is_not_list(values['slaves'])
if flag == "0":
raise ParamFormatError(param_name=value)
elif flag == "1":
raise ParamNull(param_name=value)
return recom_msg
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from layers.modules.l2norm import L2Norm
from data import *
import os
import math
#from vis_features import plot_features
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# self.avgpool = nn.AvgPool2d(7)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes, resnet18):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = VOC_512_3
self.priorbox = PriorBox(self.cfg)
self.priors = Variable(self.priorbox.forward(), volatile=True)
self.size = size
self.conv1 = resnet18.conv1
self.bn1 = resnet18.bn1
self.relu = resnet18.relu
self.maxpool = resnet18.maxpool
self.layer1 = resnet18.layer1
self.layer2 = resnet18.layer2
self.layer3 = resnet18.layer3
self.layer4 = resnet18.layer4
# self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(256, 20)
self.L2Norm2 = L2Norm(512, 20)
self.vgg1 = nn.ModuleList(base[0])
self.vgg2 = nn.ModuleList(base[1])
# self.vgg3 = nn.ModuleList(base[2])
# self.vgg4 = nn.ModuleList(base[3])
self.vgg5 = nn.ModuleList(base[4])
self.vgg6 = nn.ModuleList(base[5])
self.vgg7 = nn.ModuleList(base[6])
self.vgg8 = nn.ModuleList(base[7])
self.de1 = nn.ModuleList(base[8])
self.de2 = nn.ModuleList(base[9])
self.de3 = nn.ModuleList(base[10])
self.de4 = nn.ModuleList(base[11])
self.d19sample1 = nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.d19sample2 = nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.d19sample3 = nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=2, stride=4, bias=False),
nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.ds38_19 = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True))
self.ds19_10 = nn.Sequential(
nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=2),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True))
self.ds10_5 = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True))
self.ds5_3 = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True))
'''
self.de5_19 = nn.Sequential(
nn.ConvTranspose2d(512, 512, kernel_size=3, stride=4, padding=0, output_padding=0),
nn.BatchNorm2d(512),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True))
self.de10_38 = nn.Sequential(
nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=0),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256, 256, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True))
'''
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
self.con_press38 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(128))
'''
self.con_press19 = nn.Sequential(nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(128))
self.con_press10 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(128))
'''
if phase == 'test':
self.softmax = nn.Softmax()
# self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
sources1=list()
loc = list()
conf = list()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
'''
res=x
x = self.layer3[0].conv1(x)
x = self.layer3[0].bn1(x)
x = self.layer3[0].relu(x)
x = self.layer3[0].conv2(x)
x = self.layer3[0].bn2(x)
res= self.layer3[0].downsample(res)
x=x+res
x=self.layer3[0].relu(x)
x = self.layer3[1](x)
'''
x = self.layer3(x)
res38 = x
s = self.L2Norm(res38)
#x1=F.interpolate(s,size=[38,38])
#print(x1.size())
#sources1.append(x1)
s2 = s
for k in range(len(self.vgg2)):
s2 = self.vgg2[k](s2)
# s4 = s
# for k in range(len(self.vgg3)):
# s4 = self.vgg3[k](s4)
# s6 = s
# for k in range(len(self.vgg4)):
# s6 = self.vgg4[k](s6)
s8 = s
for k in range(len(self.vgg5)):
s8 = self.vgg5[k](s8)
for k in range(len(self.vgg6)):
s = self.vgg6[k](s)
s = torch.cat((s, s2, s8), 1)
for k in range(len(self.vgg7)):
s = self.vgg7[k](s)
s38 = self.L2Norm2(s)
# sources.append(s)
ds19 = self.ds38_19(s38)
x = self.layer4(x)
# apply vgg up to fc7
for k in range(len(self.vgg1)):
x = self.vgg1[k](x)
# if (k == 2):
# x=x*0.5+res19*0.5
ds10 = self.ds19_10(x)
xde38 = x
for k in range(len(self.de4)):
xde38 = self.de4[k](xde38)
s38_1 = self.con_press38(s38)
# sources.append(s38)
x19 = self.extras[21](x)
s19 = self.extras[22](x19)
# sources.append(x19)
res10 = self.d19sample1(x)
res5 = self.d19sample2(x)
res3 = self.d19sample3(x)
feamp = [res10, res5, res3]
# apply extra layers and cache source layer outputs
for k in range(len(self.extras)):
if (k == 21):
break
x = self.extras[k](x)
if (k == 6):
# s38_2 = self.de10_38(x)
# s38_2=s38_1+s38_2
s38 = torch.cat((s38, s38_1, xde38), 1)
for k in range(len(self.vgg8)):
s38 = self.vgg8[k](s38)
sources.append(s38)
ds5 = self.ds10_5(x)
xde19 = x
for k in range(len(self.de3)):
xde19 = self.de3[k](xde19)
xde19 = ds19 + xde19
s19 = torch.cat((s19, ds19, xde19), 1)
s19 = self.extras[23](s19)
s19 = self.extras[24](s19)
s19 = self.extras[25](s19)
sources.append(s19)
s10 = x
# sources.append(x10)
elif (k == 13):
# s19_2 = self.de5_19(x)
# s19 = s19 + s19_2
s5 = x
ds3 = self.ds5_3(x)
xde10 = x
for k in range(len(self.de2)):
xde10 = self.de2[k](xde10)
xde10 = xde10 + ds10
s10 = torch.cat((s10, ds10, xde10), 1)
x10 = self.extras[26](s10)
s10 = self.extras[27](x10)
s10 = self.extras[28](s10)
# s10 = s10 + xde10
sources.append(s10)
# sources.append(x5)
elif (k == 20):
xde5 = x
for k in range(len(self.de1)):
xde5 = self.de1[k](xde5)
xde5 = xde5 + ds5
s5 = torch.cat((s5, ds5, xde5), 1)
x5 = self.extras[29](s5)
s5 = self.extras[30](x5)
s5 = self.extras[31](s5)
sources.append(s5)
s3 = torch.cat((x, ds3), 1)
x3 = self.extras[32](s3)
s3 = self.extras[33](x3)
s3 = self.extras[34](s3)
sources.append(s3)
if (k == 0):
x = torch.cat((x, res10), 1)
elif (k == 7):
x = torch.cat((x, res5), 1)
elif (k == 14):
x = torch.cat((x, res3), 1)
# plot_features(sources1[0], 64, 1, "figure2/", (38, 38), "liye")
# for i in range(10000):
# print("ok")
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
conv8_2 = nn.Conv2d(256, 512, kernel_size=3, padding=2, dilation=2)
conv8_4 = nn.Conv2d(256, 512, kernel_size=3, padding=4, dilation=4)
conv8_6 = nn.Conv2d(256, 512, kernel_size=3, padding=6, dilation=6)
conv8_8 = nn.Conv2d(256, 512, kernel_size=3, padding=8, dilation=8)
conv9_2 = nn.Conv2d(512, 512, kernel_size=1)
conv9_4 = nn.Conv2d(512, 512, kernel_size=1)
conv9_6 = nn.Conv2d(512, 512, kernel_size=1)
conv9_8 = nn.Conv2d(512, 512, kernel_size=1)
conv9_2_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2)
conv9_4_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2)
conv9_6_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2)
conv9_8_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2)
conv10_2 = nn.Conv2d(512, 128, kernel_size=1)
conv10_4 = nn.Conv2d(512, 128, kernel_size=1)
conv10_6 = nn.Conv2d(512, 128, kernel_size=1)
conv10_8 = nn.Conv2d(512, 128, kernel_size=1)
conv11 = nn.Conv2d(256, 1024, kernel_size=1)
conv12 = nn.Conv2d(1280, 512, kernel_size=1)
conv13 = nn.Conv2d(768, 512, kernel_size=1)
de3_5 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1)
de3_5_0 = nn.BatchNorm2d(512)
de3_5_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
de3_5_2 = nn.BatchNorm2d(128)
de3_5_3 = nn.ReLU(inplace=True)
de5_10 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1)
de5_10_0 = nn.BatchNorm2d(512)
de5_10_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
de5_10_2 = nn.BatchNorm2d(128)
de5_10_3 = nn.ReLU(inplace=True)
de10_19 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1)
de10_19_0 = nn.BatchNorm2d(512)
de10_19_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
de10_19_2 = nn.BatchNorm2d(128)
de10_19_3 = nn.ReLU(inplace=True)
de19_38 = torch.nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1)
de19_38_0 = nn.BatchNorm2d(512)
de19_38_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
de19_38_2 = nn.BatchNorm2d(128)
de19_38_3 = nn.ReLU(inplace=True)
layers += [pool5, conv6, nn.BatchNorm2d(1024),
nn.ReLU(inplace=True), conv7, nn.BatchNorm2d(1024), nn.ReLU(inplace=True)]
layer1 = layers
layer21 = [conv8_2, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_2, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv9_2_, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv10_2, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
layer22 = [conv8_4, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_4, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv9_4_, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv10_4, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
layer23 = [conv8_6, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_6, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv9_6_, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv10_6, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
layer24 = [conv8_8, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_8, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv9_8_, nn.BatchNorm2d(512), nn.ReLU(inplace=True),
conv10_8, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
layer25 = [conv11, nn.BatchNorm2d(1024), nn.ReLU(inplace=True)]
layer26 = [conv12, nn.BatchNorm2d(512), nn.ReLU(inplace=True)]
layer27 = [conv13, nn.BatchNorm2d(512), nn.ReLU(inplace=True)]
layer3 = [de3_5, de3_5_0, de3_5_1, de3_5_2, de3_5_3]
layer4 = [de5_10, de5_10_0, de5_10_1, de5_10_2, de5_10_3]
layer5 = [de10_19, de10_19_0, de10_19_1, de10_19_2, de10_19_3]
layer6 = [de19_38, de19_38_0, de19_38_1, de19_38_2, de19_38_3]
# layer3 = [conv13, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
# layer4 = [conv14, nn.BatchNorm2d(128), nn.ReLU(inplace=True)]
layers = [layer1, layer21, layer22, layer23, layer24, layer25, layer26, layer27, layer3, layer4, layer5, layer6]
return layers
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
cc0 = torch.nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1))
cc0_1 = nn.BatchNorm2d(256)
cc0_2 = nn.ReLU(inplace=True)
cc1 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
cc1_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
cc1_1 = nn.BatchNorm2d(512)
cc1_2 = nn.ReLU(inplace=True)
cc2 = torch.nn.Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1))
cc2_1 = nn.BatchNorm2d(256)
cc2_2 = nn.ReLU(inplace=True)
cc3 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
cc3_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
cc3_1 = nn.BatchNorm2d(512)
cc3_2 = nn.ReLU(inplace=True)
cc4 = torch.nn.Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1))
cc4_1 = nn.BatchNorm2d(256)
cc4_2 = nn.ReLU(inplace=True)
cc5 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2),padding=1)
cc5_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
cc5_1 = nn.BatchNorm2d(512)
cc5_2 = nn.ReLU(inplace=True)
cc6 = torch.nn.Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
cc6_1 = nn.BatchNorm2d(256)
cc6_2 = nn.ReLU(inplace=True)
#cc7 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1))
cc7 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
cc7_1 = nn.BatchNorm2d(512)
cc7_2 = nn.ReLU(inplace=True)
cc8 = torch.nn.Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1))
cc8_1 = nn.BatchNorm2d(1024)
cc8_2 = torch.nn.Conv2d(1280, 1024, kernel_size=(1, 1), stride=(1, 1))
cc8_3 = nn.BatchNorm2d(1024)
cc9 = torch.nn.Conv2d(768, 512, kernel_size=(1, 1), stride=(1, 1))
cc9_1 = nn.BatchNorm2d(512)
cc10 = torch.nn.Conv2d(768, 512, kernel_size=(1, 1), stride=(1, 1))
cc10_1 = nn.BatchNorm2d(512)
cc11 = torch.nn.Conv2d(640, 512, kernel_size=(1, 1), stride=(1, 1))
cc11_1 = nn.BatchNorm2d(512)
'''
cc12 = torch.nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
cc12_1 = nn.BatchNorm2d(512)
'''
layers = [cc0, cc0_1, cc0_2,
cc1, cc1_0, cc1_1, cc1_2,
cc2, cc2_1, cc2_2,
cc3, cc3_0, cc3_1, cc3_2,
cc4, cc4_1, cc4_2,
cc5, cc5_0, cc5_1, cc5_2,
cc8, cc8_1, cc8_2, cc8_3, nn.ReLU(inplace=True), cc9, cc9_1, nn.ReLU(inplace=True), cc10, cc10_1,
nn.ReLU(inplace=True), cc11, cc11_1, nn.ReLU(inplace=True),
cc6, cc6_1, cc6_2,
cc7, cc7_1, cc7_2,
]
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = [
torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# torch.nn.Conv2d(512, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(1024, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ,
torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
#torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
]
conf_layers = [
torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# torch.nn.Conv2d(512, 126, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# torch.nn.Conv2d(512, 84, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(1024, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ,
torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
#torch.nn.Conv2d(512, 6*21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'320': [],
'300': [],
'512': [],
}
extras = {
'320': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'320': [4, 6, 6, 6, 4, 4],
'300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def build_net(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
# if size != 300:
# print("ERROR: You specified size " + repr(size) + ". However, " +
# "currently only SSD300 (size=300) is supported!")
# return
base_, extras_, head_ = multibox(vgg(base[str(size)], 3),
add_extras(extras[str(size)], 1024),
mbox[str(size)], num_classes)
return SSD(phase, size, base_, extras_, head_, num_classes, resnet18())
|
python
|
"""
This module defines views used in CRUD operations on articles.
"""
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.permissions import (
AllowAny, IsAuthenticatedOrReadOnly, IsAuthenticated
)
from rest_framework.serializers import ValidationError
from datetime import datetime
from rest_framework.views import APIView
from django.db.models import Avg
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import ListView
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
from rest_framework import authentication
from .serializers import CommentSerializer, ArticleSerializer, ArticleRatingSerializer, LikesSerializer, TagsSerializer
# Add pagination
from rest_framework.pagination import PageNumberPagination
# Add search package
from rest_framework.filters import SearchFilter
from django_filters.rest_framework import DjangoFilterBackend
from .renderers import ArticleJSONRenderer, BookmarkJSONRenderer
from .serializers import (
ArticleSerializer, ArticleRatingSerializer, LikesSerializer, TagsSerializer,
ArticleReportSerializer, ArticleReportRetrieveSerializer, BookmarkSerializer
)
from .models import (
Article, ArticleRating, Likes, ArticleTags, ArticleReport, Bookmark)
from authors.apps.notifications.models import notify_follower
def create_tag(tags, article):
"""
This method checks whether a tag with tag provided exists in the database
and creates it if it does not exist.
:params str tag: name of the new tag or tag to query from the database
:returns cls object ArticleTags: the tag as retrieved from the database
"""
# retrieve all tag names and create new ones if they do not exist
# also, add them to the articles and save the article instance
for tag in tags.split(','):
article_tag = ArticleTags.objects.filter(tag__icontains=tag.strip())
if not article_tag:
data = {'tag': tag.strip()}
serializer = TagsSerializer(data=data)
serializer.is_valid(raise_exception=True)
article_tag = serializer.save()
article.article_tags.add(article_tag)
else:
article.article_tags.add(article_tag.first())
article.save()
return None
from .models import Article, ArticleRating, Likes, Comment
class ArticleAPIView(generics.ListCreateAPIView):
"""
get:
Retrieve all articles
post:
Create a new article
"""
queryset = Article.objects.all()
serializer_class = ArticleSerializer
renderer_classes = (ArticleJSONRenderer,)
permission_classes = (IsAuthenticatedOrReadOnly,)
# Apply pagination to view
pagination_class = PageNumberPagination
# Add search class and fields
filter_backends = (SearchFilter, DjangoFilterBackend, )
# Define search and filter fields with the field names mapped to a list of lookups
fields = {
'author__username': ['icontains'],
'title': ['icontains'],
'article_tags__tag': ['icontains'],
}
search_fields = fields
filter_fields = fields
def post(self, request):
"""
Creates an article
:params HttpRequest: a post request with article data sent by clients
to create a new article.
:return aricleObject:returns a successfully created article
"""
# Retrieve article data from the request object and convert it
# to a kwargs object
# get user data at this point
article = {
'title': request.data.get('title', None),
'body': request.data.get('body', None),
'description': request.data.get('description', None),
'author': request.user.username
}
# pass article data to the serializer class, check whether the data is
# valid and if valid, save it.
serializer = self.serializer_class(data=article)
serializer.is_valid(raise_exception=True)
article = serializer.save()
# retrieve the tags as passed on in the article data
tags = request.data.get('tags', None)
if tags:
create_tag(tags, article)
return Response(serializer.data, status.HTTP_201_CREATED)
@receiver(post_save, sender=Article)
def notify_follower_reciever(sender, instance, created, **kwargs):
"""
Send a notification after the article being created is saved.
"""
if created:
message = (instance.author.username +
" has created an article. Title: " + instance.title)
notify_follower(instance.author, message, instance)
class ArticleDetailsView(generics.RetrieveUpdateDestroyAPIView):
"""
get:
put:
delete:
"""
serializer_class = ArticleSerializer
renderer_classes = (ArticleJSONRenderer,)
permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, slug):
try:
return Article.objects.get(slug=slug)
except ObjectDoesNotExist:
return None
def get(self, request, slug):
"""
Retrieve a specific article from the database given it's article id.
:params str slug: a slug of an article you want to retrieve
:returns article: a json data for the requested article
"""
article = self.get_object(slug)
if article:
serializer = self.serializer_class(
article, context={'request': request})
return Response(serializer.data, status.HTTP_200_OK)
else:
# return error message indicating article requested is not found.
return Response({
'error': 'Article with given id does not exist'
}, status.HTTP_404_NOT_FOUND)
def delete(self, request, slug):
"""
Delete a given article.
:params slug: a slug of the article to be deleted
request: a request object with authenticated user credentials
:returns json message: a json object containing message to indicate
that the article has been deleted
"""
article = self.get_object(slug)
if not article:
# return error message for non-existing article
return Response({
'error': 'Article with given id does not exist'
}, status.HTTP_404_NOT_FOUND)
# check whether user owns this article before attempting to delete it
if article.author.id == request.user.id:
article.delete()
return Response(
{
'message': "Article deleted successfully"
}, status.HTTP_200_OK)
else:
# prevent a user from deleting an article s/he does not own
return Response({
'error':
'You cannot delete articles belonging to other users.'
}, status.HTTP_403_FORBIDDEN)
def put(self, request, slug):
"""
Update a single article
:params str slug: a slug for the article to be updated
request: a request object with new data for the article
:returns article: An updated article in json format
"""
article = self.get_object(slug)
if not article:
# Tell client we have not found the requested article
return Response({
'error': 'Article requested does not exist'
}, status.HTTP_404_NOT_FOUND)
# check whether user owns this article and proceed if they do
if article.author.id == request.user.id:
request.data['author'] = request.user.username
serializer = self.serializer_class(article, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
tags = request.data.get('tags', None)
# clear all tags in the article before adding new ones
article.article_tags.clear()
if tags:
# add tags to the article
create_tag(tags, article)
return Response(serializer.data, status.HTTP_200_OK)
else:
# prevent a user from updating an article s/he does not own
return Response(
{
'error': 'You cannot edit an article you do not own.'
}, status.HTTP_403_FORBIDDEN)
class FavoriteArticle(generics.CreateAPIView):
"""
A user is able to favourite an article if they had not favourited it.
If they had favourited it, the article becomes unfavourited.
"""
permission_classes = (IsAuthenticated,)
queryset = Article.objects.all()
serializer_class = ArticleSerializer
def post(self, request, slug):
"""
This method handles favouriting and unfavouriting of articles.
Checks whether the article exists.
Checks whether the user has favourited the article in order to favourite
it or unfavourite it if the user had already favourited it.
"""
try:
article = Article.objects.get(slug=slug)
except ObjectDoesNotExist:
response = {
"message": "The article does not exist",
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
user = request.user
if user in article.favourited.all():
# User has already favourited it, unfavourites the article
article.favourited.remove(user.id)
article.save()
serializer = self.get_serializer(article)
message = "You have successfully unfavourited this article"
response = {"message": message, "article": serializer.data}
return Response(response, status=status.HTTP_200_OK)
else:
# Favourites the article
article.favourited.add(user.id)
article.save()
serializer = self.get_serializer(article)
message = "You have successfully favourited this article"
response = {"message": message, "article": serializer.data}
return Response(response, status=status.HTTP_200_OK)
class ArticleRatingAPIView(generics.ListCreateAPIView):
"""
get:
Retrieve all article ratings
post:
Create a new article rating
"""
permission_classes = (IsAuthenticated,)
queryset = ArticleRating.objects.all()
serializer_class = ArticleRatingSerializer
renderer_classes = (ArticleJSONRenderer,)
def post(self, request, slug):
"""
Creates an article rating
:params HttpRequest: A post request with article rating data sent by
clients to create a new article rating.
:return: Returns a successfully created article rating
"""
# Retrieve article rating data from the request object and convert it
# to a kwargs object
# get user data at this point
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {'message': 'That article does not exist'}
return Response(response, status=status.HTTP_404_NOT_FOUND)
if article.author.id == request.user.id:
wink_emoji = u"\U0001F609"
data = {
'message':
'We see what you did there {}. Sorry, but you cannot rate your '
'own article.'.format(wink_emoji)
}
return Response(data, status.HTTP_403_FORBIDDEN)
article_rating = {
'article': article.id,
'user': request.user.id,
'rating': request.data.get('rating', None),
}
# pass article data to the serializer class, check whether the data is
# valid and if valid, save it.
serializer = self.serializer_class(data=article_rating)
serializer.is_valid(raise_exception=True)
serializer.save()
# Save the average article rating to the Article model
q = ArticleRating.objects.filter(article_id=article.id).aggregate(
Avg('rating'))
article.rating_average = q['rating__avg']
article.save(update_fields=['rating_average'])
data = {"message": "Thank you for taking time to rate this article."}
data = {
"message":
"Thank you for taking time to rate this article."
}
return Response(data, status.HTTP_201_CREATED)
class ArticleLikes(generics.ListCreateAPIView):
"""
post:
like or dislike an article
"""
serializer_class = LikesSerializer
def get_object(self, slug):
try:
return Article.objects.get(slug=slug)
except ObjectDoesNotExist:
return None
def post(self, request, slug):
"""
creates an article like or a dislike
:params HttpRequest: this request contains a user authorization token
and a json payload in the form{
"like": True/False
}. True is a like while False is a dislike
slug: a slug for the article user wants to like or dislike
:returns str:message thanking user for taking time to give their
opinion on this article
status code 201: Indicates the a new record has been created
for a lik or dislike
"""
# Let's check whether we have the correct payload before doing any
# database transaction since they are very expensive to us.
# This variable, `like`, holds user intention which can be a
# like or dislike
like = request.data.get('like', None)
if like is None or type(like) != type(True):
return Response(
{'message':
'You must indicate whether you like or dislike this article'
},
status.HTTP_400_BAD_REQUEST)
# we continue now since we are sure we have a valid payload
# Check whether user has already like or dislike this article
likes = None
# Let's check whether the article requested exists in our
# database and retrieve it
article = self.get_object(slug)
try:
likes = Likes.objects.get(user=request.user.id, article=article)
except ObjectDoesNotExist:
# let's do nothing here since we are only checking whether user has
# liked or disliked this article
pass
# Alert user if article does not exist
if not article:
return Response(
{
'message': 'Article requested does not exist'
}, status.HTTP_404_NOT_FOUND
)
new_like = {
'article': article.id,
'user': request.user.id,
'like': like
}
# If there is a record for this article and the current user in the
# system, we modify it instead of creating a new one.
if likes:
# user had liked the article but now wants to dislike it
if likes.like and not like:
article.userLikes.remove(request.user)
article.userDisLikes.add(request.user)
# user had disliked this article but now wants to like it
elif not likes.like and like:
article.userLikes.add(request.user)
article.userDisLikes.remove(request.user)
elif like:
# User can only like an article once or dislike an article once
msg = '{}, you already liked this article.'.format(
request.user.username)
return Response(
{
'message': msg
}, status.HTTP_403_FORBIDDEN
)
else:
msg = '{}, you already disliked this article.'.format(
request.user.username)
return Response(
{
'message': msg
}, status.HTTP_403_FORBIDDEN
)
# save the new value/state of the article
article.save()
# There is no need to create a new record; edit the existing one
likes.like = like
likes.save()
else:
# We don't need to do any more operations here
# because this is user's first time to see this article
serializer = self.serializer_class(data=new_like)
serializer.is_valid(raise_exception=True)
serializer.save()
# update likes count or dislikes count for the article
if like:
article.userLikes.add(request.user)
else:
article.userDisLikes.add(request.user)
# save the new state of our article
article.save()
# Tell user we are successful
return Response(
{
'message': (
'Thank you {} for giving your opinion on this '.format(
request.user.username) + 'article.'
)
}, status.HTTP_201_CREATED
)
class ArticleReportAPIView(generics.ListCreateAPIView):
"""
get:
Retrieve all article reports
post:
Create a new article report
"""
permission_classes = (IsAuthenticated,)
queryset = ArticleReport.objects.all()
serializer_class = ArticleReportSerializer
renderer_classes = (ArticleJSONRenderer,)
def list(self, request, slug):
"""Method for listing all reports."""
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {
'message': 'That article does not exist.'
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
if request.user.is_staff:
queryset = self.get_queryset()
else:
queryset = ArticleReport.objects.filter(user_id=request.user.id)
if not queryset.exists():
response = {
'message': 'No concerns have been raised on this article.'
}
return Response(data=response, status=status.HTTP_404_NOT_FOUND)
serializer = ArticleReportRetrieveSerializer(queryset, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, slug):
"""Method for reporting an article."""
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {
'message': 'That article does not exist.'
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
user_report_count = ArticleReport.objects.filter(
article_id=article.id, user_id=request.user.id).count()
if user_report_count > 4:
response = {
'message':
'You are not allowed to report an article more than five times.'
}
return Response(response, status=status.HTTP_200_OK)
article_report = {
'article': article.id,
'user': request.user.id,
'text': request.data.get('text', None),
}
# pass article data to the serializer class, check whether the data is
# valid and if valid, save it.
serializer = self.serializer_class(data=article_report)
serializer.is_valid(raise_exception=True)
serializer.save()
# Save the total number of reports flagged on this article.
total_report_count = ArticleReport.objects.filter(
article_id=article.id).count()
article.report_count = total_report_count
article.save(update_fields=['report_count'])
data = {
"message":
"Your feedback has been recorded. Authors' "
"Haven thanks you for your service."
}
return Response(data, status.HTTP_201_CREATED)
class ArticleReportRUDAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
get:
Retrieve an article report
delete:
Delete an article report
put:
Update an article report
"""
permission_classes = (IsAuthenticated,)
serializer_class = ArticleReportSerializer
renderer_classes = (ArticleJSONRenderer,)
def get_article_object(self, pk):
""" Getter method for an ArticleReport using pk (primary key)."""
try:
return ArticleReport.objects.get(pk=pk)
except ObjectDoesNotExist:
return None
def get(self, request, slug, pk):
"""The method for retrievieng a sinlge Article Report."""
article_report = self.get_article_object(pk)
"""
Attempt to get an article using the slug.
If article doesn't exist the user will receive a message telling them so
"""
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {
'message': 'That article does not exist.'
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
if article_report:
if request.user.is_staff or request.user == article_report.user:
serializer = ArticleReportRetrieveSerializer(article_report)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data={
'message': 'You are not allowed to view this report.'
}, status=status.HTTP_403_FORBIDDEN)
else:
# return error message indicating article report is not found.
return Response(data={
'message': 'That article report does not exist.'
}, status=status.HTTP_404_NOT_FOUND)
def put(self, request, slug, pk):
article_report = self.get_article_object(pk)
"""
Attempt to get an article using the slug.
If article doesn't exist the user will receive a message telling them so
"""
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {
'message': 'That article does not exist.'
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
if article_report:
if request.user.is_staff or request.user == article_report.user:
article_data = {
'article': article_report.article.id,
'user': request.user.id,
'text': request.data.get('text', None),
}
serializer = self.serializer_class(
article_report, data=article_data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data={
'message': 'You are not allowed to update this report.'
}, status=status.HTTP_403_FORBIDDEN)
else:
# return error message indicating article report is not found.
return Response(data={
'message': 'That article report does not exist.'
}, status=status.HTTP_404_NOT_FOUND)
def delete(self, request, slug, pk):
article_report = self.get_article_object(pk)
"""
Attempt to get an article using the slug.
If article doesn't exist the user will receive a message telling them so
"""
try:
article = Article.objects.get(slug=slug)
except Exception:
response = {
'message': 'That article does not exist.'
}
return Response(response, status=status.HTTP_404_NOT_FOUND)
if article_report:
if request.user.is_staff or request.user == article_report.user:
article_report.delete()
# Save the total number of reports flagged on this article.
total_report_count = ArticleReport.objects.filter(
article_id=article.id).count()
article.report_count = total_report_count
article.save(update_fields=['report_count'])
return Response(data={
'message': "Report was deleted successfully"
}, status=status.HTTP_200_OK)
else:
return Response(data={
'message': 'You are not allowed to delete this report.'
}, status=status.HTTP_403_FORBIDDEN)
else:
# return error message indicating article report is not found.
return Response(data={
'message': 'That article report does not exist.'
}, status=status.HTTP_404_NOT_FOUND)
class ListCreateCommentAPIView(generics.ListCreateAPIView):
"""
Get and Post Comments
"""
permission_classes = (IsAuthenticated, )
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def create(self, request, *args, **kwargs):
"""
Post a comment
"""
article = Article.objects.get(slug=kwargs["slug"])
comment_data = {
'article': article,
'commented_by': request.user.username,
'comment_body': request.data.get('comment_body', None)
}
serializer = self.serializer_class(data=comment_data)
serializer.is_valid(raise_exception=True)
serializer.save(article=article)
return Response(serializer.data)
def get(self, request, slug, *args, **kwargs):
"""Get all comments for a particular article"""
article = Article.objects.get(slug=slug)
comments = Comment.objects.filter(article=article)
serializer = self.serializer_class(data=comments, many=True)
serializer.is_valid()
return Response(serializer.data, status=status.HTTP_200_OK)
class RetrieveCommentAPIView(generics.RetrieveDestroyAPIView,
generics.CreateAPIView):
"""
This class contains method to retrieve and delete a comment
"""
permission_classes = (IsAuthenticated, )
queryset = Comment.objects.all()
serializer_class = CommentSerializer
renderer_classes = (ArticleJSONRenderer, )
def create(self, request, pk, *args, **kwargs):
"""
This method creates child comment(thread-replies on the parent comment)
"""
try:
parent = Comment.objects.get(pk=pk)
article = parent.article
except ObjectDoesNotExist:
raise ValidationError("comment with this ID doesn't exist")
comment_data = {
'article': article.slug,
'commented_by': request.user.username,
'comment_body': request.data.get('comment_body', None)
}
serializer = self.serializer_class(data=comment_data)
serializer.is_valid(raise_exception=True)
serializer.save(
parent=parent, article=article, commented_by=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request, pk, *args, **kwargs):
"""Get a comment instance"""
try:
comment = Comment.objects.get(pk=pk)
except Comment.DoesNotExist:
raise ValidationError("The comment your entered does not exist")
comment_data = {
"comment": comment.comment_body,
"commented_by": comment.commented_by.username,
"created_at": str(comment.created_at),
"parent": comment.parent,
"id": comment.id
}
return Response(comment_data, status=status.HTTP_200_OK)
def delete(self, request, pk, *args, **kwargs):
"""Delete a comment instance"""
try:
comment = Comment.objects.get(pk=pk)
except Comment.DoesNotExist:
raise ValidationError(
"The comment you are trying to delete does not exist")
comment.delete()
return Response({"msg": "You have deleted the comment"})
class RetrieveCommentsofAPIView(generics.ListAPIView):
"""
This class contains method to retrieve comments of a comment
"""
permission_classes = (IsAuthenticated, )
queryset = Comment.objects.all()
serializer_class = CommentSerializer
renderer_classes = (ArticleJSONRenderer, )
def list(self, request, pk, slug):
"""Method for listing all comments of a comment."""
try:
comment = self.queryset.get(pk=pk)
except Comment.DoesNotExist:
raise ValidationError("The comment does not exist")
comments = Comment.objects.filter(parent=comment)
serializer = self.serializer_class(data=comments, many=True)
serializer.is_valid()
return Response(serializer.data, status=status.HTTP_200_OK)
class ArticleBookmarkAPIView(generics.CreateAPIView):
"""
post:
Bookmark an article for future reading.
get:
This endpoint is not supported
"""
renderer_classes = (BookmarkJSONRenderer, )
permission_classes = (IsAuthenticatedOrReadOnly, )
serializer_class = BookmarkSerializer
queryset = Bookmark.objects.all()
def get(self, request, slug=None):
return Response(
{'message': 'Sorry {}, this '.format(request.user.username)
+ 'request on this endpoint is not allowed.'
}, status.HTTP_403_FORBIDDEN)
def post(self, request, slug):
try:
article = Article.objects.get(slug=slug)
data = {
'article': article.id,
'user': request.user.id
}
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
bookmark = {
"id": serializer.data['id'],
"article": serializer.data['article']
}
return Response(bookmark, status.HTTP_201_CREATED)
except ObjectDoesNotExist:
return Response(
{
'message': 'Sorry {}, '.format(request.user.username)
+ 'the article you have want to bookmark does not exist'
}, status.HTTP_404_NOT_FOUND
)
class ArticleBookmarkDetailAPIView(generics.RetrieveDestroyAPIView):
"""
get:
Retrieve a singe or all bookmarks for a logged in user
delete:
Delete a single or all bookmarks
"""
permission_classes = (IsAuthenticated, )
serializer_class = BookmarkSerializer
queryset = Bookmark.objects.all()
def get(self, request, pk=None):
if pk:
bookmarks = Bookmark.objects.filter(user=request.user)
serializer = self.serializer_class(data=bookmarks, many=True)
serializer.is_valid()
return Response(serializer.data)
else:
bookmarks = Bookmark.objects.filter(user=request.user)
serializer = self.serializer_class(data=bookmarks, many=True)
serializer.is_valid()
return Response(serializer.data)
def delete(self, request, pk=None):
try:
if pk:
bookmark = Bookmark.objects.get(pk=pk)
if bookmark.user.username == request.user.username:
bookmark.delete()
return Response({'message': "Bookmark deleted successfully"
}, status.HTTP_200_OK)
else:
# prevent a user from deleting a bookmark s/he does not own
return Response({
'error': 'Sorry {}, '.format(request.user.username)
+ 'you cannot delete bookmarks belonging to other users.'
}, status.HTTP_403_FORBIDDEN)
else:
bookmarks = Bookmark.objects.filter(user=request.user)
bookmarks.delete()
return Response({'message': "All bookmarks deleted successfully"
}, status.HTTP_200_OK)
except ObjectDoesNotExist:
return Response({
'message': 'Sorry {}, '.format(request.user.username)
+ 'the bookmark you want to delete does not exist'
}, status.HTTP_404_NOT_FOUND
)
|
python
|
from .dynamo import DynamoClient
from .s3 import S3Client
|
python
|
#! python3
# -*- encoding: utf-8 -*-
'''
Current module: rman.app.rm_task.models
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: [email protected]
RCS: rman.app.rm_task.models, v1.0 2019年12月5日
FROM: 2019年12月5日
********************************************************************
======================================================================
Provide a function for the automation test
'''
from rman.app import db
from sqlalchemy import Column, Integer, String, DateTime
class Rmtask(db.Model):
''' 测试项目 '''
__tablename__ = 't_rtsf_task'
id = Column(Integer, primary_key=True)
case = Column(String(64), nullable = False, comment = u'测试集名称')
desc = Column(String(64), nullable = True, comment = u'任务描述')
tid = Column(String(128), nullable = True, comment = u'任务ID')
status = Column(Integer, nullable = True, default=0, comment = u'0-未执行, 1-执行中, 2-执行成功, 3-执行失败, 4-无效脚本, 5-redis服务异常')
report_url = Column(String(128), nullable = True, comment = u'报告链接')
report_path = Column(String(128), nullable = True, comment = u'报告路径')
create_time = Column(DateTime, nullable = False)
update_time = Column(DateTime, nullable = False)
def __init__(self, **kwargs):
_ = [setattr(self, k, v) for k,v in kwargs.items()]
def __repr__(self):
return '<Rmtask %r>' % (self.id)
|
python
|
#!/usr/bin/python3.3
# -*- coding: utf-8 -*-
# core.py
# Functions:
# [X] loading servers details
# [X] loading servers config (in which is found username, etc.)
# [ ] logging to disk commands and status
# [ ] loading and providing configuration e.g. is_enabled()
# [ ] provides ini reading interface
import irc.bot, configparser
import sys
class ServerSpec(irc.bot.ServerSpec):
def __init__(self, host, port, password, nickname, username, realname, channels, modes):
if password == '':
password = None
super().__init__(host, port, password)
self.nickname = nickname
self.realname = realname
self.username = username
self.channels = channels
self.modes = modes
# LOADING SERVERS CONFIG
#sys.argv[1] = 'quakenet'
#TODO: ^spoofing the cmdline for testing purposes, TO REMOVE
print('booting up...')
_serversparser = configparser.ConfigParser()
_serversparser.read('config/servers.ini')
print('available servers:', ', '.join(_serversparser.sections()))
assert len(sys.argv) > 1, 'you must provide a server to connect to'
assert sys.argv[1] in _serversparser.sections(), '{0} server does not exist'.format(sys.argv[1])
print('will connect to {0} ({1}:{2})'.format(sys.argv[1],
_serversparser[sys.argv[1]]['host'],
_serversparser[sys.argv[1]]['port']))
#loading server details
server_config = configparser.ConfigParser()
server_config.read('config/{0}.ini'.format(sys.argv[1]))
details = server_config['details']
def write_config():
with open('config/{0}.ini'.format(sys.argv[1]), mode='w') as f:
server_config.write(f)
#creating the ServerSpec object
chosen_server = ServerSpec(_serversparser[sys.argv[1]]['host'],
int(_serversparser[sys.argv[1]]['port']),
_serversparser[sys.argv[1]]['password'],
details['nickname'],
details['username'],
details['realname'],
details['channels'].split(','),
details['modes'])
with open('VERSION') as file:
version = file.read()
def split(txt, target):
# split according to \n in text
# split in 512 bytes (and be careful not to split in the middle of a UTF-8 control code)
final_text = []
for i in txt.split('\n'):
if len(i.encode())+len(target.encode()) >= 500:
# "PRIVMSG #channel :message\r\n" must not exceed 512 bytes
s = i.encode()
splitted = []
cursor = 500-len(target)
while ''.join(j.decode() for j in splitted) != i:
try:
s[:cursor].decode()
except UnicodeDecodeError:
cursor -= 1
splitted.append(s[:cursor])
s = s[cursor:]
cursor -= len(s)
final_text += [k.decode() for k in splitted]
else:
final_text.append(i)
return final_text
reloadable_modules = ('functions', 'weather', 'google', 'soundcloud', 'parse_links', 'admin')
def stop(): raise StopIteration()
triggersparser = configparser.ConfigParser()
triggersparser.read('strings/triggers.ini')
triggers = triggersparser['triggers']
def savetriggers():
with open('strings/triggers.ini', mode='w') as file:
triggersparser.write(file)
|
python
|
##############################################################
# Customer Issue Prediction Model
#-------------------------------------------------------------
# Author : Alisa Ai
#
##############################################################
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly
import plotly.graph_objs as go
from plotly import tools
from chart_studio import plotly
import csv
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from bs4 import BeautifulSoup
import re
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
import dill
##############################################################
app = dash.Dash()
#app.layout = html.Div(children=[
#html.H1(children='Predict Customer Issues', style={'textAlign': 'center'}),
#html.Div(children=[
#html.Label('Enter you complaints: '),
#dcc.Input(id='complaints-text', placeholder='Complaints', type='text'),
#html.Div(id='result')
#], style={'textAlign': 'center'}),
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
layout = dict(
autosize=True,
height=450,
font=dict(color="#191A1A"),
titlefont=dict(color="#191A1A", size='14'),
margin=dict(
l=45,
r=15,
b=45,
t=35
)
)
app.layout = html.Div([
# Title - Row
html.Div(
[
html.Div(
[
html.H1(
'Customer Issue Prediction App',
style={'font-family': 'Helvetica',
"margin-left": "20",
"margin-bottom": "0"},
className='eight columns',
)
],
className='row'
),
html.Div(
[
html.H4(
'--Developed by Alisa Ai--',
style={'font-family': 'Helvetica',
"margin-left": "10",
"margin-bottom": "0"},
className='three columns',
)
],
className='row'
)
]),
#block 2
html.Div([
dcc.Store(id = 'memory'),
html.Div(
[
html.Div(
[
html.Label('Enter your complaints here: '),
dcc.Input(id='complaints-text', placeholder='Complaints', type='text',
style=dict(width='1000px', height='100px', display='inline-block', verticalAlign="middle"))],
className='eight columns',
style={"height": "auto", "width": "2000px", "margin-bottom": "auto", 'whiteSpace': 'pre-line' }
),
html.Div(
[
html.P('Select Your Product:'),
dcc.Dropdown(id = 'product', options=[
{'label': 'Checking or savings account', 'value': 1},
{'label': 'Consumer Loan', 'value': 2},
{'label': 'Credit card or prepaid card', 'value': 3},
{'label': 'Credit reporting, credit repair services, or other personal consumer reports', 'value': 4},
{'label': 'Debt collection', 'value': 5},
{'label': 'Money transfer, virtual currency, or money service', 'value': 6},
{'label': 'Mortgage', 'value': 7},
{'label': 'Other financial service', 'value': 8},
{'label': 'Payday loan, title loan, or personal loan', 'value': 9},
{'label': 'Student loan', 'value': 10},
{'label': 'Vehicle loan or lease', 'value': 11}],
placeholder="Select Your Product",
style=dict(width='300px', height='40px', display='inline-block', verticalAlign="middle"))],
className='three columns',
style={"height": "auto", "width": "2000px", "margin-bottom": "auto"}
),
html.Div(
[
html.P('Select Your State:'),
dcc.Dropdown(
id = 'state', options=[
{'label': 'FL', 'value': 22},
{'label': 'GA', 'value': 23},
{'label': 'IL', 'value': 24},
{'label': 'NC', 'value': 25},
{'label': 'NJ', 'value': 26},
{'label': 'NY', 'value': 27},
{'label': 'OH', 'value': 28},
{'label': 'PA', 'value': 29},
{'label': 'TX', 'value': 30},
{'label': 'Other', 'value': 31}],
placeholder="Select Your State",
style=dict(width='300px', height='40px', display='inline-block', verticalAlign="middle"))],
className='three columns',
style={"height": "auto", "width": "2000px", "margin-bottom": "auto"}
),
html.Div(
[
html.Button('Submit', id='button_1')
],
className='one columns',
style={'margin-bottom': 'auto'}
),
html.Div(id='result')],
style={'textAlign': 'center'})
])
])
@app.callback(
Output(component_id='result', component_property='children'),
[Input(component_id='complaints-text', component_property='value'),
Input(component_id='product', component_property='value'),
Input(component_id='state', component_property='value'),
Input('button_1', 'n_clicks')]
)
def update_issue(complaints, pro, stat, n_clicks):
if n_clicks is not None:
if complaints is not None and complaints is not '':
try:
############# vaderSentiment
text = re.sub("[XX$]"," ", complaints)
text = re.sub(r'\s+', ' ', text)
analyser = SentimentIntensityAnalyzer()
pos = analyser.polarity_scores(text)['pos']
neg = analyser.polarity_scores(text)['neg']
############# Clean
text2 = re.sub("[^a-zA-Z]"," ", text)
stopword = set(stopwords.words('english'))
text2 = ' '.join([word for word in text2.split() if word not in (stopword)])
porter_stemmer = PorterStemmer()
text2 = porter_stemmer.stem(text2)
############# input organize
index_dict = {
'Product_Checking or savings account': 1,
'Product_Consumer Loan': 2,
'Product_Credit card or prepaid card': 3,
'Product_Credit reporting, credit repair services, or other personal consumer reports': 4,
'Product_Debt collection': 5,
'Product_Money transfer, virtual currency, or money service': 6,
'Product_Mortgage': 7,
'Product_Other financial service': 8,
'Product_Payday loan, title loan, or personal loan': 9,
'Product_Student loan': 10,
'Product_Vehicle loan or lease': 11,
'State_FL': 12,
'State_GA': 13,
'State_IL': 14,
'State_NC': 15,
'State_NJ': 16,
'State_NY': 17,
'State_OH': 18,
'State_PA': 19,
'State_TX': 20,
'State_Other': 21}
def dummy(index_dict, pro, stat):
for key, value in index_dict.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)
if pro == value:
index_dict[key] = 100
if stat == value:
index_dict[key] = 100
for key, value in index_dict.items():
if value < 100:
index_dict[key] = 0
if value == 100:
index_dict[key] = 1
return index_dict
attribute_index = dummy(index_dict=index_dict, pro=pro, stat=stat)
attribute_index['positive_score'] = pos
attribute_index['negative_score'] = neg
attribute_index['clean_sentences'] = 'text2'
input_data = pd.DataFrame(attribute_index, index=[0])
issue = model.predict(input_data)[0]
return 'Guess you facing with this issue: {}. Our customer service manager will come to you very soon'.format(str(issue))
except ValueError:
return 'Unable to predict issue'
if __name__ == '__main__':
with open('/Users/hengyuai/Documents/QMSS_1/PD/Customer-Issue_prediction/pipeline.pkl', 'rb') as file:
model = dill.load(file)
app.run_server(debug=True)
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from urllib import quote
import jsonpickle
from cairis.core.Countermeasure import Countermeasure
from cairis.core.Target import Target
from cairis.core.CountermeasureEnvironmentProperties import CountermeasureEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.tools.PseudoClasses import SecurityAttribute, CountermeasureTarget, CountermeasureTaskCharacteristics
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Shamal Faily'
class CountermeasureAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_countermeasure_name = 'Location-based X.509 extension'
self.existing_countermeasure_type = 'Information'
self.existing_countermeasure_description = 'X.509 certificates extended to tie client workstations so NeuroGrid tasks can only be carried out on these.'
self.existing_environment_name = 'Psychosis'
self.existing_requirements = ['User certificate']
self.existing_targets = [CountermeasureTarget('Certificate Ubiquity','High','Discourages certificate sharing')]
self.existing_properties = []
self.existing_rationale = ['None','None','None','None','None','None','None','None']
self.existing_cost='Medium'
self.existing_roles=['Data Consumer','Certificate Authority']
self.existing_personas=[CountermeasureTaskCharacteristics('Upload data','Claire','None','None','None','Low Hindrance'),CountermeasureTaskCharacteristics('Download data','Claire','None','None','None','Low Hindrance')]
countermeasure_class = Countermeasure.__module__+'.'+Countermeasure.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/countermeasures?session_id=test')
countermeasures = jsonpickle.decode(rv.data)
self.assertIsNotNone(countermeasures, 'No results after deserialization')
self.assertIsInstance(countermeasures, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(countermeasures), 0, 'No countermeasures in the dictionary')
self.logger.info('[%s] Countermeasures found: %d', method, len(countermeasures))
countermeasure = countermeasures.values()[0]
self.logger.info('[%s] First countermeasure: %s [%d]\n', method, countermeasure['theName'], countermeasure['theId'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/countermeasures/name/%s?session_id=test' % quote(self.existing_countermeasure_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
countermeasure = jsonpickle.decode(rv.data)
self.assertIsNotNone(countermeasure, 'No results after deserialization')
self.logger.info('[%s] Countermeasure: %s [%d]\n', method, countermeasure['theName'], countermeasure['theId'])
def test_delete(self):
method = 'test_delete'
url = '/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name())
new_countermeasure_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_countermeasure_body)
self.app.post('/api/countermeasures', content_type='application/json', data=new_countermeasure_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
self.logger.info('[%s] Response data: %s', method, rv.data)
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/countermeasures'
self.logger.info('[%s] URL: %s', method, url)
new_countermeasure_body = self.prepare_json()
self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
rv = self.app.post(url, content_type='application/json', data=new_countermeasure_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('countermeasure_id', None)
self.assertIsNotNone(env_id, 'No countermeasure ID returned')
self.assertGreater(env_id, 0, 'Invalid countermeasure ID returned [%d]' % env_id)
self.logger.info('[%s] Countermeasure ID: %d\n', method, env_id)
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
def test_target_names(self):
method = 'test_countermeasure-targets-by-requirement-get'
url = '/api/countermeasures/targets/environment/Psychosis?requirement=User%20certificate&session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
targetList = jsonpickle.decode(rv.data)
self.assertIsNotNone(targetList, 'No results after deserialization')
self.assertGreater(len(targetList), 0, 'No targets returned')
self.logger.info('[%s] Targets found: %d', method, len(targetList))
self.assertEqual(targetList[0],'Certificate ubiquity')
self.assertEqual(targetList[1],'Social engineering')
def test_task_names(self):
method = 'test_countermeasure-tasks-by-role-get'
url = '/api/countermeasures/tasks/environment/Psychosis?role=Certificate%20Authority&role=Data%20Consumer&role=Researcher&session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
taskList = jsonpickle.decode(rv.data)
self.assertIsNotNone(taskList, 'No results after deserialization')
self.assertEqual(len(taskList),2)
self.assertEqual(taskList[0]['theTask'],'Download data')
self.assertEqual(taskList[0]['thePersona'],'Claire')
self.assertEqual(taskList[1]['theTask'],'Upload data')
self.assertEqual(taskList[1]['thePersona'],'Claire')
def test_put(self):
method = 'test_put'
url = '/api/countermeasures'
self.logger.info('[%s] URL: %s', method, url)
new_countermeasure_body = self.prepare_json()
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
rv = self.app.post(url, content_type='application/json', data=new_countermeasure_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('countermeasure_id', None)
self.assertIsNotNone(env_id, 'No countermeasure ID returned')
self.assertGreater(env_id, 0, 'Invalid countermeasure ID returned [%d]' % env_id)
self.logger.info('[%s] Countermeasure ID: %d', method, env_id)
countermeasure_to_update = self.prepare_new_countermeasure()
countermeasure_to_update.theName = 'Edited test countermeasure'
countermeasure_to_update.theId = env_id
upd_env_body = self.prepare_json(countermeasure=countermeasure_to_update)
rv = self.app.put('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The countermeasure was not successfully updated')
rv = self.app.get('/api/countermeasures/name/%s?session_id=test' % quote(countermeasure_to_update.name()))
upd_countermeasure = jsonpickle.decode(rv.data)
self.assertIsNotNone(upd_countermeasure, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, rv.data)
self.logger.info('[%s] Countermeasure: %s [%d]\n', method, upd_countermeasure['theName'], upd_countermeasure['theId'])
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(countermeasure_to_update.theName))
def test_generate_asset(self):
method = 'test_generate_asset'
url = '/api/countermeasures/name/' + quote(self.existing_countermeasure_name) + '/generate_asset?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.post(url, content_type='application/json',data=jsonpickle.encode({'session_id':'test'}))
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
self.assertGreater(message.find('successfully generated'), -1, 'Countermeasure asset not generated')
def prepare_new_countermeasure(self):
new_countermeasure_props = [
CountermeasureEnvironmentProperties(
environmentName=self.existing_environment_name,
requirements=self.existing_requirements,
targets=self.existing_targets,
properties=self.existing_properties,
rationale=self.existing_rationale,
cost=self.existing_cost,
roles=self.existing_roles,
personas=self.existing_personas)
]
new_countermeasure = Countermeasure(
cmId=-1,
cmName='New countermeasure',
cmDesc='New CM description',
cmType='Information',
tags=[],
cProps=[]
)
new_countermeasure.theEnvironmentProperties = new_countermeasure_props
new_countermeasure.theEnvironmentDictionary = {}
delattr(new_countermeasure, 'theEnvironmentDictionary')
return new_countermeasure
def prepare_dict(self, countermeasure=None):
if countermeasure is None:
countermeasure = self.prepare_new_countermeasure()
else:
assert isinstance(countermeasure, Countermeasure)
return {
'session_id': 'test',
'object': countermeasure,
}
def prepare_json(self, data_dict=None, countermeasure=None):
if data_dict is None:
data_dict = self.prepare_dict(countermeasure=countermeasure)
else:
assert isinstance(data_dict, dict)
new_countermeasure_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_countermeasure_body)
return new_countermeasure_body
|
python
|
import pytest
import pandas
@pytest.fixture(scope="session")
def events():
return pandas.read_pickle("tests/data/events_pickle.pkl")
|
python
|
## https://weinbe58.github.io/QuSpin/examples/example7.html
## https://weinbe58.github.io/QuSpin/examples/example15.html
## https://weinbe58.github.io/QuSpin/examples/user-basis_example0.html
## https://weinbe58.github.io/QuSpin/user_basis.html
## https://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_1d.html
from __future__ import print_function, division
from quspin.operators import hamiltonian # operators
from quspin.basis import spin_basis_1d # Hilbert space spin basis
import numpy as np # general math functions
#
###### define model parameters ######
Jleg = 1.0 # spin-spin interaction, leg
Jrung = 1.0 # spin-spin interaction, rung
L = 12 # length of chain
N = 2*L # number of sites
###### setting up bases ######
#basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0)
basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0,a=2,kblock=0,pblock=1,zblock=1)## even L
#basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0,a=2,kblock=0,pblock=-1,zblock=-1)## odd L
###### setting up hamiltonian ######
Jzzs = \
[[Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[Jrung,i,i+1] for i in range(0,N,2)]
Jpms = \
[[0.5*Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[0.5*Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[0.5*Jrung,i,i+1] for i in range(0,N,2)]
Jmps = \
[[0.5*Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[0.5*Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[0.5*Jrung,i,i+1] for i in range(0,N,2)]
static = [["zz",Jzzs],["+-",Jpms],["-+",Jmps]]
# build hamiltonian
#H = hamiltonian(static,[],static_fmt="csr",basis=basis_1d,dtype=np.float64)
no_checks = dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],static_fmt="csr",basis=basis_1d,dtype=np.float64,**no_checks)
# diagonalise H
#ene,vec = H.eigsh(time=0.0,which="SA",k=2)
ene = H.eigsh(which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene)
print(Jleg,Jrung,N,ene[0]/N)
## 2-leg ladder (L=inf): -0.578043140180 (PhysRevB.89.094424, see also PhysRevB.54.R3714, PhysRevB.47.3196)
|
python
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_creat_user_email_succesful(self):
email='hello.com'
password='123123'
user =get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertEqual(user.check_password(password),True)
def test_new_user_normalize(self):
email="[email protected]"
user =get_user_model().objects.create_user(
email,'123123'
)
self.assertEqual(user.email,email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None,'test123')
def test_creat_new_super_user(self):
user=get_user_model().objects.create_superuser(
'[email protected]',
'123123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
python
|
""" Itertools examples """
import itertools
import collections
import operator
import os
# itertools.count can provide an infinite counter.
for i in itertools.count(step=1):
print i
if i == 20: break
# itertools.cycle cycles through an iterator
# Will keep printing 'python'
for i,j in enumerate(itertools.cycle(['python'])):
print j
if i==10: break
# itertools.repeat keeps repeating from an iterator
# Will keep producing range(10) when called in a loop
print itertools.repeat(range(10))
# chain returns elements from 'n' iterators until they are exhausted.
# Make a dictionary of count of letters in a list of strings.
birds = ['parrot','crow','dove','peacock','macaw','hen']
frequency = collections.defaultdict(int)
for letter in itertools.chain(*birds):
frequency[letter] += 1
print frequency
# takewhile returns elements as long as a predicate(condition) is True.
# Give list of favorable countries
countries=['U.S','U.K','India','Australia','Malaysia','Pakistan']
print list(itertools.takewhile(lambda x: x != 'Pakistan', countries))
# dropwhile keeps dropping elements while predicate is True.
# Produce iterator of files > a minimum size in current folder.
files = sorted([(file, os.path.getsize(file)) for file in os.listdir(".")],
key=operator.itemgetter(1))
print list(itertools.dropwhile(lambda x: x[1] < 8192, files))
|
python
|
# Generated by Django 3.1.7 on 2021-09-06 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0004_auto_20210623_2006'),
]
operations = [
migrations.AlterField(
model_name='thumbnails',
name='large',
field=models.CharField(max_length=20, verbose_name='relative location of large thumbnail'),
),
migrations.AlterField(
model_name='thumbnails',
name='small',
field=models.CharField(max_length=20, verbose_name='relative location of small thumbnail'),
),
]
|
python
|
# ------------------------------------------------------------------
# Step 1: import scipy and pyamg packages
# ------------------------------------------------------------------
import numpy as np
import pyamg
import matplotlib.pyplot as plt
# ------------------------------------------------------------------
# Step 2: setup up the system using pyamg.gallery
# ------------------------------------------------------------------
n = 200
X, Y = np.meshgrid(np.linspace(0, 1, n), np.linspace(0, 1, n))
stencil = pyamg.gallery.diffusion_stencil_2d(type='FE', epsilon=0.001, theta=np.pi / 3)
A = pyamg.gallery.stencil_grid(stencil, (n, n), format='csr')
b = np.random.rand(A.shape[0]) # pick a random right hand side
# ------------------------------------------------------------------
# Step 3: setup of the multigrid hierarchy
# ------------------------------------------------------------------
ml = pyamg.smoothed_aggregation_solver(A) # construct the multigrid hierarchy
# ------------------------------------------------------------------
# Step 4: solve the system
# ------------------------------------------------------------------
res1 = []
x = ml.solve(b, tol=1e-12, residuals=res1) # solve Ax=b to a tolerance of 1e-12
# ------------------------------------------------------------------
# Step 5: print details
# ------------------------------------------------------------------
print(ml) # print hierarchy information
print("residual norm is", np.linalg.norm(b - A * x)) # compute norm of residual vector
print("\n\n\n\n\n")
# notice that there are 5 (or maybe 6) levels in the hierarchy
#
# we can look at the data in each of the levels
# e.g. the multigrid components on the finest (0) level
# A: operator on level 0
# P: prolongation operator mapping from level 1 to level 0
# R: restriction operator mapping from level 0 to level 1
# B: near null-space modes for level 0
# presmoother: presmoothing function taking arguments (A,x,b)
# postsmoother: postsmoothing function taking arguments (A,x,b)
print(dir(ml.levels[0]))
# e.g. the multigrid components on the coarsest (4) level
print(dir(ml.levels[-1]))
# there are no interpoation operators (P,R) or smoothers on the coarsest level
# check the size and type of the fine level operators
print('type = ', ml.levels[0].A.format)
print(' A = ', ml.levels[0].A.shape)
print(' P = ', ml.levels[0].P.shape)
print(' R = ', ml.levels[0].R.shape)
print("\n\n\n\n\n")
# ------------------------------------------------------------------
# Step 6: change the hierarchy
# ------------------------------------------------------------------
# we can also change the details of the hierarchy
ml = pyamg.smoothed_aggregation_solver(A, # the matrix
B=X.reshape(n * n, 1), # the representation of the near null space (this is a poor choice)
BH=None, # the representation of the left near null space
symmetry='hermitian', # indicate that the matrix is Hermitian
strength='evolution', # change the strength of connection
aggregate='standard', # use a standard aggregation method
smooth=('jacobi', {'omega': 4.0 / 3.0, 'degree': 2}), # prolongation smoothing
presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),
improve_candidates=[('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 4}), None],
max_levels=10, # maximum number of levels
max_coarse=5, # maximum number on a coarse level
keep=False) # keep extra operators around in the hierarchy (memory)
# ------------------------------------------------------------------
# Step 7: print details
# ------------------------------------------------------------------
res2 = [] # keep the residual history in the solve
x = ml.solve(b, tol=1e-12, residuals=res2) # solve Ax=b to a tolerance of 1e-12
print(ml) # print hierarchy information
print("residual norm is", np.linalg.norm(b - A * x)) # compute norm of residual vector
print("\n\n\n\n\n")
# ------------------------------------------------------------------
# Step 8: plot convergence history
# ------------------------------------------------------------------
plt.semilogy(res1)
plt.semilogy(res2)
plt.title('Residual Histories')
plt.legend(['Default Solver', 'Specialized Solver'])
plt.xlabel('Iteration')
plt.ylabel('Relative Residual')
plt.show()
|
python
|
import os
import shutil
import wget
import json
import logging
import tempfile
import traceback
import xml.etree.ElementTree as ET
import networkx as nx
from pml import *
#logging.basicConfig(level=logging.INFO)
def get_file(file, delete_on_exit = []):
# due to current limitations of DMC, allow shortened URLs
if "://" not in file:
file = "http://" + file
if file.startswith("file://"):
return file[7:]
else:
# create temp file to store the file contents
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
os.unlink(tmpfile)
# download the file contents
wget.download(file.replace("?dl=0", "?dl=1"), tmpfile)
delete_on_exit.append(tmpfile)
return tmpfile
def write_outputs(file, fields):
with open(file, "w") as f:
for name, value in fields.items():
f.write(str(name) + " = " + str(value))
f.write("\n")
def exit_with_message(message):
write_outputs("output.txt", { "message" : message})
exit(-1)
def read_inputs(file):
inputs = {}
with open(file, "r") as f:
for line in f:
if len(line.strip()) > 0:
tokens = line.split("=")
inputs[tokens[0].strip()] = tokens[1].strip()
return inputs
def validate_inputs(inputs, fields):
for name, (required, type) in fields.items():
if required and name not in inputs:
exit_with_message("missing required input " + str(name))
if name in inputs:
inputs[name] = type(inputs[name])
return inputs
def process(input_file, user_constants=None, weight="cost"):
# Initialize the system
auto_register("library")
# Update system with user-defined constants
if user_constants is not None:
load_constants(user_constants)
# Load structure from iFAB BOM
processGraph = load_ebom(input_file)
# Expand the process graph using the PML models
expand_graph(processGraph)
# Save graph as image
as_png(processGraph, "graph.png")
# Validate the graph by ensuring routings exist
if validate_graph(processGraph):
# Find the routing that optimizes the user-defined weight (e.g., cost or time)
(_, selected_processes) = find_min(processGraph, weight=weight)
minimumGraph = create_subgraph(processGraph, selected_processes)
# Save the minimum routings to a graph
as_png(minimumGraph, "minimumGraph.png")
# Compute the cost and time
total_cost = sum_weight(minimumGraph, weight="cost")
total_time = sum_weight(minimumGraph, weight="time")
# Output the results
write_outputs("output.txt", { "message" : "Design is manufacturable",
"cost" : float(total_cost / dollars),
"time" : float(total_time / days) })
else:
exit_with_message("Unable to manufacture design, no routings exist")
if __name__ == "__main__":
try:
INPUT_DEFN = { "inputFile" : (True, str), "userConstants" : (False, str), "optimizeWeight" : (False, str)}
# read and validate the inputs from DOME
inputs = read_inputs("input.txt")
inputs = validate_inputs(inputs, INPUT_DEFN)
# convert inputs to kwargs, track any temporary files
kwargs = {}
delete_on_exit = []
kwargs["input_file"] = get_file(inputs["inputFile"], delete_on_exit)
if "userConstants" in inputs:
kwargs["user_constants"] = get_file(inputs["userConstants"], delete_on_exit)
if "optimizeWeight" in inputs:
kwargs["weight"] = inputs["optimizeWeight"]
# process the submission
process(**kwargs)
# delete the temporary files
for file in delete_on_exit:
os.unlink(file)
except Exception as e:
traceback.print_exc()
exit_with_message("An error occurred: " + str(e))
|
python
|
a= int(input("input an interger:"))
n1=int("%s" % a)
n2=int("%s%s" % (a,a))
n3=int("%s%s%s" % (a,a,a))
print(n1+n2+n3)
|
python
|
#PROGRAMA PARA CALCULAR AS DIMENSÕES DE UMA SAPATA DE DIVISA
import math
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-= OTIMIZAÇÃO DE SAPATA DE DIVISA =-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print("Utilize sempre PONTO (.) NÃO VÍRGULA (,)")
lado_A = float(input("Qual o Tamanho do Lado A? "))
lado_B = float(input("Qual o Tamanho do Lado B? "))
area = lado_A*lado_B
A = math.sqrt((area/2))
B = A*2
print("\nO Lado maior A pode ser 2 ou 2.5 vezes maior que B.\n"
"Dessa forma, a sapata otimizada possui as seguintes Dimensões:\n ")
#print("Sua sapata possui uma área de: {} m²" .format(area))
print("O Lado A fica com {} m" .format(A))
print("O Lado B fica com {} m" .format(B))
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-= OTIMIZAÇÃO DE SAPATA DE DIVISA =-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
|
python
|
from math import e
import pandas as pd
from core.mallows import Mallows
from core.patterns import ItemPref, Pattern, PATTERN_SEP
def get_itempref_of_2_succ_3_succ_m(m=10) -> ItemPref:
return ItemPref({i: {i + 1} for i in range(1, m - 1)})
def get_test_case_of_itempref(pid=0):
row = pd.read_csv('data/test_cases_item_prefs.csv').iloc[pid]
pref = ItemPref.from_string(row['pref'])
mallows = Mallows(list(range(row['m'])), row['phi'])
p_exact = e ** row['log_p']
return pref, mallows, p_exact
def get_test_case_of_pattern(pid=0):
row = pd.read_csv('data/test_cases_label_patterns.csv').iloc[pid]
pattern = Pattern.from_string(row['pattern'])
mallows = Mallows(list(range(row['m'])), row['phi'])
p_exact = e ** row['log_p']
return pattern, mallows, p_exact
def get_test_case_of_patterns_from_movielens_2_labels(rid=0):
p_exact = pd.read_csv('data/output_movielens_ramp-vs-amp_2labels_exact.csv').loc[rid, 'p_exact']
row = pd.read_csv('data/input_movielens_ramp-vs-amp_2labels.csv').loc[rid]
center = eval(row['ranking'])
mallows = Mallows(center=center, phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(PATTERN_SEP)]
return patterns, mallows, p_exact
def get_test_case_of_patterns_from_movielens_linear(rid=0):
row = pd.read_csv('data/input_movielens_ramp-vs-amp.csv').loc[rid]
center = eval(row['ranking'])
mallows = Mallows(center=center, phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(PATTERN_SEP)]
return patterns, mallows
def get_test_case_of_patterns_from_movielens_5_labels(rid=0):
"""
Hard cases for rAMP are 36, 52, 68, 84, 100, 116, 132, 148
"""
row = pd.read_csv('data/input_movielens_ramp-vs-amp_5_labels.csv').loc[rid]
mallows = Mallows(center=eval(row['ranking']), phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(' <> ')]
return patterns, mallows
def get_test_case_of_patterns_from_synthetic_4_labels(pid=0):
df_ans = pd.read_csv('data/test_cases_4_labels_sharing_BD_3_subs_convergence_by_ramp_3.csv')
df_ans = df_ans.groupby('rid').first()
p_exact = df_ans.loc[pid, 'p_exact']
row = pd.read_csv('data/test_cases_4_labels_sharing_BD_3_subs.csv').loc[pid]
patterns_str = row['pref(A>C|A>D|B>D)']
patterns = [Pattern.from_string(pattern_str) for pattern_str in patterns_str.split('\n')]
mallows = Mallows(list(range(row['m'])), row['phi'])
return patterns, mallows, p_exact
if __name__ == '__main__':
res = get_test_case_of_patterns_from_movielens_5_labels()
print(res)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Calendar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('color', models.CharField(max_length=100)),
('privacy', models.IntegerField(default=0)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateTimeField(default=django.utils.timezone.now)),
('end', models.DateTimeField(default=django.utils.timezone.now)),
('title', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('description', models.CharField(max_length=600)),
('calendar', models.ForeignKey(to='ourcalendar.Calendar')),
('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple script that queries GitHub for all open PRs, then finds the ones without
issue number in title, and the ones where the linked JIRA is already closed
"""
import os
import sys
sys.path.append(os.path.dirname(__file__))
import argparse
import json
import re
from github import Github
from jira import JIRA
from datetime import datetime
from time import strftime
try:
from jinja2 import Environment, BaseLoader
can_do_html = True
except:
can_do_html = False
def read_config():
parser = argparse.ArgumentParser(description='Find open Pull Requests that need attention')
parser.add_argument('--json', action='store_true', default=False, help='Output as json')
parser.add_argument('--html', action='store_true', default=False, help='Output as html')
parser.add_argument('--token', help='Github access token in case you query too often anonymously')
newconf = parser.parse_args()
return newconf
def out(text):
global conf
if not (conf.json or conf.html):
print(text)
def make_html(dict):
if not can_do_html:
print ("ERROR: Cannot generate HTML. Please install jinja2")
sys.exit(1)
global conf
template = Environment(loader=BaseLoader).from_string("""
<h1>Lucene Github PR report</h1>
<p>Number of open Pull Requests: {{ open_count }}</p>
<h2>PRs lacking JIRA reference in title ({{ no_jira_count }})</h2>
<ul>
{% for pr in no_jira %}
<li><a href="https://github.com/apache/lucene/pull/{{ pr.number }}">#{{ pr.number }}: {{ pr.created }} {{ pr.title }}</a> ({{ pr.user }})</li>
{%- endfor %}
</ul>
<h2>Open PRs with a resolved JIRA ({{ closed_jira_count }})</h2>
<ul>
{% for pr in closed_jira %}
<li><a href="https://github.com/apache/lucene/pull/{{ pr.pr_number }}">#{{ pr.pr_number }}</a>: <a href="https://issues.apache.org/jira/browse/{{ pr.issue_key }}">{{ pr.status }} {{ pr.resolution_date }} {{ pr.issue_key}}: {{ pr.issue_summary }}</a> ({{ pr.assignee }})</li>
{%- endfor %}
</ul>
""")
return template.render(dict)
def main():
global conf
conf = read_config()
token = conf.token if conf.token is not None else None
if token:
gh = Github(token)
else:
gh = Github()
jira = JIRA('https://issues.apache.org/jira')
result = {}
repo = gh.get_repo('apache/lucene')
open_prs = repo.get_pulls(state='open')
out("Lucene Github PR report")
out("============================")
out("Number of open Pull Requests: %s" % open_prs.totalCount)
result['open_count'] = open_prs.totalCount
lack_jira = list(filter(lambda x: not re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
result['no_jira_count'] = len(lack_jira)
lack_jira_list = []
for pr in lack_jira:
lack_jira_list.append({'title': pr.title, 'number': pr.number, 'user': pr.user.login, 'created': pr.created_at.strftime("%Y-%m-%d")})
result['no_jira'] = lack_jira_list
out("\nPRs lacking JIRA reference in title")
for pr in lack_jira_list:
out(" #%s: %s %s (%s)" % (pr['number'], pr['created'], pr['title'], pr['user'] ))
out("\nOpen PRs with a resolved JIRA")
has_jira = list(filter(lambda x: re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
issue_ids = []
issue_to_pr = {}
for pr in has_jira:
jira_issue_str = re.match(r'.*\b((LUCENE)-\d{3,6})\b', pr.title).group(1)
issue_ids.append(jira_issue_str)
issue_to_pr[jira_issue_str] = pr
resolved_jiras = jira.search_issues(jql_str="key in (%s) AND status in ('Closed', 'Resolved')" % ", ".join(issue_ids))
closed_jiras = []
for issue in resolved_jiras:
pr_title = issue_to_pr[issue.key].title
pr_number = issue_to_pr[issue.key].number
assignee = issue.fields.assignee.name if issue.fields.assignee else None
closed_jiras.append({ 'issue_key': issue.key,
'status': issue.fields.status.name,
'resolution': issue.fields.resolution.name,
'resolution_date': issue.fields.resolutiondate[:10],
'pr_number': pr_number,
'pr_title': pr_title,
'issue_summary': issue.fields.summary,
'assignee': assignee})
closed_jiras.sort(key=lambda r: r['pr_number'], reverse=True)
for issue in closed_jiras:
out(" #%s: %s %s %s: %s (%s)" % (issue['pr_number'],
issue['status'],
issue['resolution_date'],
issue['issue_key'],
issue['issue_summary'],
issue['assignee'])
)
result['closed_jira_count'] = len(resolved_jiras)
result['closed_jira'] = closed_jiras
if conf.json:
print(json.dumps(result, indent=4))
if conf.html:
print(make_html(result))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nReceived Ctrl-C, exiting early')
|
python
|
currency = [10000, 5000, 2000, 1000, 500, 200, 100, 25, 10, 5, 1]
for _ in range(int(input())):
money = input()
money = int(money[:-3] + money[-2:])
out = ""
for c in currency:
out += str(money // c)
money %= c
print(out)
|
python
|
import tkinter as tk
from tkinter import Frame, Button, PanedWindow, Text
from tkinter import X, Y, BOTH
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import pyplot as plt
class MatplotlibWindow(object):
def __init__(self,root):
self.root=root
fig,ax=plt.subplots()
xs=np.arange(-np.pi,np.pi,0.001)
ys=np.sin(xs)
ax.plot(xs,ys)
plot_frame=Frame(self.root)
self.root.add(plot_frame)
canvas=FigureCanvasTkAgg(fig,master=plot_frame)
toolbar = NavigationToolbar2TkAgg(canvas, plot_frame)
toolbar.update()
self.canvas=canvas
class MatplotlibWindow2(object):
def __init__(self,root):
self.root=root
fig,ax=plt.subplots()
xs=np.arange(-np.pi,np.pi,0.001)
ys=np.cos(xs)
ax.plot(xs,ys)
plot_frame=Frame(self.root)
self.root.add(plot_frame)
canvas=FigureCanvasTkAgg(fig,master=plot_frame)
toolbar = NavigationToolbar2TkAgg(canvas, plot_frame)
toolbar.update()
self.canvas=canvas
def main():
root=tk.Tk()
main_paned_window = PanedWindow(root)
main_paned_window.pack(fill=BOTH, expand=1)
tone_curve_paned_window=PanedWindow(main_paned_window)
main_paned_window.add(tone_curve_paned_window)
tone_curve_window=PanedWindow(tone_curve_paned_window,relief=tk.GROOVE,bd=3,orient=tk.VERTICAL)
mlp_tone_curve_window=MatplotlibWindow2(tone_curve_window)
mlp_tone_curve_window.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=True)
#text_panel_left = Text(main_paned_window, height=6, width =15,relief=tk.GROOVE,bd=2)
#main_paned_window.add(text_panel_left)
sub_paned_window = PanedWindow(main_paned_window, orient=tk.VERTICAL)
#plot sin curve
plot_paned_window=PanedWindow(sub_paned_window,relief=tk.GROOVE,bd=3,orient=tk.VERTICAL)
mlp_window=MatplotlibWindow(plot_paned_window)
mlp_window.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=True)
main_paned_window.add(sub_paned_window)
bottom_pane_text = Text(sub_paned_window, height=3, width =3, relief=tk.SUNKEN,bd=2)
sub_paned_window.add(plot_paned_window)
sub_paned_window.add(bottom_pane_text)
button=Button(root,text="Hello")
button.pack()
root.mainloop()
if __name__ == '__main__':
main()
|
python
|
import csv
with open(str(input('Arquivo .csv Airodump-ng: '))) as arquivoCsv:
print('\n Rede Senha')
try:
reader = csv.reader(arquivoCsv)
for linha in reader:
if not linha: # Verifica se a lista está vazia
pass
else:
if linha[0] == 'Station MAC': # Sai do for porque é onde acaba as redes wireless do arquivo .csv
break
else:
dicio = { 'BSSID':linha[0],'ESSID':linha[13] } # Dicionário que contem o nome e MAC da rede wirilless
if dicio['BSSID'] == 'BSSID': # Ignora a primeira linha do arquivo .csv
pass
else:
if 'VIVO-' in dicio['ESSID']: # Apenas mostra as redes VIVO-
senha = dicio['BSSID'][3:-5].replace(':', '')+dicio['ESSID'][6:]
print(dicio['ESSID'], senha)
finally:
print('\n')
arquivoCsv.close()
|
python
|
from .node_base import NodeBase
from .exceptions import NodeRegistrationError, NodeNotFoundError
class NodeFactory(object):
def __init__(self) -> None:
super().__init__()
self._nodes = {}
self._names = {}
def registerNode(self, node: NodeBase):
if node is None:
raise ValueError('node param is invalid')
name = node.NODE_NAME
node_path = node.getNodePath()
if name in self._names:
raise NodeRegistrationError(f'Node name "{name}" is already registered')
if self._nodes.get(node_path.lower()):
raise NodeRegistrationError(f'Node "{node_path}" is already registered')
self._nodes[node_path.lower()] = node
self._names[name] = node_path.lower()
def getNodesStructures(self) -> list:
result = []
for identifier, node in self._nodes.items():
result.append(node.getNodeStructure())
return result
def getNodeClass(self, path) -> NodeBase:
if not self.isPathValid(path):
raise ValueError('invalid path')
nodeClass = self._nodes.get(path.lower(), None)
if not nodeClass:
raise NodeNotFoundError(f'Node {path} was not found')
return nodeClass
def isPathValid(self, path: str):
if not path:
return False
return path.find(' ') == -1
|
python
|
import asyncio
import hikari
import tanjun
from hikari.interactions.base_interactions import ResponseType
from hikari.messages import ButtonStyle
from hikari_testing.bot.client import Client
component = tanjun.Component()
@component.with_slash_command
@tanjun.as_slash_command("paginate", "Paginate through a list of options!")
async def command_paginate(ctx: tanjun.abc.Context) -> None:
values = ("Page 1", "Page 2", "Page 3", "Page 4", "Page 5", "Page 6")
index = 0
button_menu = (
ctx.rest.build_action_row()
.add_button(ButtonStyle.SECONDARY, "<<")
.set_label("<<")
.add_to_container()
.add_button(ButtonStyle.PRIMARY, "<")
.set_label("<")
.add_to_container()
.add_button(ButtonStyle.PRIMARY, ">")
.set_label(">")
.add_to_container()
.add_button(ButtonStyle.SECONDARY, ">>")
.set_label(">>")
.add_to_container()
)
await ctx.respond(values[0], component=button_menu)
while True:
try:
event = await ctx.client.events.wait_for(hikari.InteractionCreateEvent, timeout=60)
except asyncio.TimeoutError:
await ctx.edit_initial_response("Timed out.", components=[])
else:
if event.interaction.custom_id == "<<":
index = 0
elif event.interaction.custom_id == "<":
index = (index - 1) % len(values)
elif event.interaction.custom_id == ">":
index = (index + 1) % len(values)
elif event.interaction.custom_id == ">>":
index = len(values) - 1
await ctx.edit_initial_response(values[index])
await event.interaction.create_initial_response(
ResponseType.DEFERRED_MESSAGE_UPDATE,
values[index]
)
@tanjun.as_loader
def load_component(client: Client) -> None:
client.add_component(component.copy())
|
python
|
# Holly Zhang sp20-516-233 E.Multipass.2
# testing code
p = Provider()
# TestMultipass.test_provider_run_os
r1 = p.run(command="uname -a", executor="os")
print(r1)
#Linux cloudmesh 4.15.0-74-generic #84-Ubuntu SMP Thu Dec 19 08:06:28 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
# TestMultipass.test_provider_run_live
r2 = self.provider.run(command="uname -a", executor="live")
print(r2)
#
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rudra Rugge
from cfgm_common import svc_info
from vnc_api.vnc_api import *
from instance_manager import InstanceManager
class VirtualMachineManager(InstanceManager):
def _create_svc_vm(self, instance_name, image_name, nics,
flavor_name, st_obj, si_obj, avail_zone):
proj_name = si_obj.get_parent_fq_name()[-1]
if flavor_name:
flavor = self._nc.oper('flavors', 'find', proj_name,
name=flavor_name)
else:
flavor = self._nc.oper('flavors', 'find', proj_name, ram=4096)
if not flavor:
return
image = self._nc.oper('images', 'find', proj_name, name=image_name)
if not image:
return
# create port
nics_with_port = []
for nic in nics:
nic_with_port = {}
vmi_obj = self._create_svc_vm_port(nic, instance_name,
st_obj, si_obj)
nic_with_port['port-id'] = vmi_obj.get_uuid()
nics_with_port.append(nic_with_port)
# launch vm
self.logger.log('Launching VM : ' + instance_name)
nova_vm = self._nc.oper('servers', 'create', proj_name,
name=instance_name, image=image,
flavor=flavor, nics=nics_with_port,
availability_zone=avail_zone)
nova_vm.get()
self.logger.log('Created VM : ' + str(nova_vm))
# create vnc VM object and link to SI
try:
proj_obj = self._vnc_lib.project_read(
fq_name=si_obj.get_parent_fq_name())
vm_obj = VirtualMachine(nova_vm.id)
vm_obj.uuid = nova_vm.id
self._vnc_lib.virtual_machine_create(vm_obj)
except RefsExistError:
vm_obj = self._vnc_lib.virtual_machine_read(id=nova_vm.id)
vm_obj.add_service_instance(si_obj)
self._vnc_lib.virtual_machine_update(vm_obj)
self.logger.log("Info: VM %s updated SI %s" %
(vm_obj.get_fq_name_str(), si_obj.get_fq_name_str()))
return nova_vm
def create_service(self, st_obj, si_obj):
si_props = si_obj.get_service_instance_properties()
st_props = st_obj.get_service_template_properties()
if st_props is None:
self.logger.log("Cannot find service template associated to "
"service instance %s" % si_obj.get_fq_name_str())
return
flavor = st_props.get_flavor()
image_name = st_props.get_image_name()
if image_name is None:
self.logger.log("Error: Image name not present in %s" %
(st_obj.name))
return
# populate nic information
nics = self._get_nic_info(si_obj, si_props, st_props)
# get availability zone
avail_zone = None
if st_props.get_availability_zone_enable():
avail_zone = si_props.get_availability_zone()
elif self._args.availability_zone:
avail_zone = self._args.availability_zone
# create and launch vm
vm_back_refs = si_obj.get_virtual_machine_back_refs()
proj_name = si_obj.get_parent_fq_name()[-1]
max_instances = si_props.get_scale_out().get_max_instances()
self.db.service_instance_insert(si_obj.get_fq_name_str(),
{'max-instances': str(max_instances),
'state': 'launching'})
instances = []
for inst_count in range(0, max_instances):
instance_name = self._get_instance_name(si_obj, inst_count)
si_info = self.db.service_instance_get(si_obj.get_fq_name_str())
prefix = self.db.get_vm_db_prefix(inst_count)
if prefix + 'name' not in si_info.keys():
vm = self._create_svc_vm(instance_name, image_name, nics,
flavor, st_obj, si_obj, avail_zone)
if not vm:
continue
vm_uuid = vm.id
state = 'pending'
else:
vm = self._nc.oper('servers', 'find', proj_name,
id=si_info[prefix + 'uuid'])
if not vm:
continue
vm_uuid = si_info[prefix + 'uuid']
state = 'active'
# store vm, instance in db; use for linking when VM is up
vm_db_entry = self._set_vm_db_info(inst_count, instance_name,
vm_uuid, state)
self.db.service_instance_insert(si_obj.get_fq_name_str(),
vm_db_entry)
instances.append({'uuid': vm_uuid})
self.db.service_instance_insert(si_obj.get_fq_name_str(),
{'state': 'active'})
# uve trace
self.logger.uve_svc_instance(si_obj.get_fq_name_str(),
status='CREATE', vms=instances,
st_name=st_obj.get_fq_name_str())
def delete_service(self, si_fq_str, vm_uuid, proj_name=None):
self.db.remove_vm_info(si_fq_str, vm_uuid)
try:
self._vnc_lib.virtual_machine_delete(id=vm_uuid)
except (NoIdError, RefsExistError):
pass
vm = self._nc.oper('servers', 'find', proj_name, id=vm_uuid)
if not vm:
raise KeyError
try:
vm.delete()
except Exception:
pass
def check_service(self, si_obj, proj_name=None):
status = 'ACTIVE'
vm_list = {}
vm_back_refs = si_obj.get_virtual_machine_back_refs()
for vm_back_ref in vm_back_refs or []:
vm = self._nc.oper('servers', 'find', proj_name,
id=vm_back_ref['uuid'])
if vm:
vm_list[vm.name] = vm
else:
try:
self._vnc_lib.virtual_machine_delete(id=vm_back_ref['uuid'])
except (NoIdError, RefsExistError):
pass
# check status of VMs
si_props = si_obj.get_service_instance_properties()
max_instances = si_props.get_scale_out().get_max_instances()
for inst_count in range(0, max_instances):
instance_name = self._get_instance_name(si_obj, inst_count)
if instance_name not in vm_list.keys():
status = 'ERROR'
elif vm_list[instance_name].status == 'ERROR':
try:
self.delete_service(si_obj.get_fq_name_str(),
vm_list[instance_name].id, proj_name)
except KeyError:
pass
status = 'ERROR'
# check change in instance count
if vm_back_refs and (max_instances > len(vm_back_refs)):
status = 'ERROR'
elif vm_back_refs and (max_instances < len(vm_back_refs)):
for vm_back_ref in vm_back_refs:
try:
self.delete_service(si_obj.get_fq_name_str(),
vm_back_ref['uuid'], proj_name)
except KeyError:
pass
status = 'ERROR'
return status
def update_static_routes(self, si_obj):
# get service instance interface list
si_props = si_obj.get_service_instance_properties()
si_if_list = si_props.get_interface_list()
if not si_if_list:
return
st_list = si_obj.get_service_template_refs()
fq_name = st_list[0]['to']
st_obj = self._vnc_lib.service_template_read(fq_name=fq_name)
st_props = st_obj.get_service_template_properties()
st_if_list = st_props.get_interface_type()
for idx in range(0, len(si_if_list)):
si_if = si_if_list[idx]
static_routes = si_if.get_static_routes()
if not static_routes:
static_routes = {'route':[]}
# update static routes
try:
rt_fq_name = self._get_if_route_table_name(
st_if_list[idx].get_service_interface_type(),
si_obj)
rt_obj = self._vnc_lib.interface_route_table_read(
fq_name=rt_fq_name)
rt_obj.set_interface_route_table_routes(static_routes)
self._vnc_lib.interface_route_table_update(rt_obj)
except NoIdError:
pass
def delete_iip(self, vm_uuid):
try:
vm_obj = self._vnc_lib.virtual_machine_read(id=vm_uuid)
except NoIdError:
return
vmi_back_refs = vm_obj.get_virtual_machine_interface_back_refs()
for vmi_back_ref in vmi_back_refs or []:
try:
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_back_ref['uuid'])
except NoIdError:
continue
iip_back_refs = vmi_obj.get_instance_ip_back_refs()
for iip_back_ref in iip_back_refs or []:
try:
self._vnc_lib.instance_ip_delete(id=iip_back_ref['uuid'])
except (NoIdError, RefsExistError):
continue
|
python
|
# Date : 03/31/2020
# Author : mcalyer
# Module : scanse_control.py
# Description : Code to explore Scanse LIDAR capabilites
# Python : 2.7
# Version : 0.2
# References :
#
# 1. Scanse User Manual V1.0 , 04/20/2017
# 2. Scanse sweep-ardunio source
# 3. sweep-sdk-1.30_10_27_2017
#
# Hardware : PC , Scanse Hardware version 1.0 , FW version 01 , 2016 KickStarter
# Not available today
#
# Notes:
# 0. Really ?! , Unit appears to wooble during spinning
# 1.
# 2. Power : 5V at 450 - 500 ma
# 3. Motor Speed : if setting is '0''0' motor off but when power cycled resets to 5HZ
# 4. Embedded use : power control scanse : control motor , power usage , fail safe device reset
# 5. There is python using driver example for Linux , see sweepy in SDK
# 6. Need to look at driver source
# 7. Scanse Status LED :
# Blinking Green = Start up OK , no ouput
# Solid Blue = Normal operation
# Solid Red = Internal communication error
# 8. Example Scan Seetings :
# Speed : 5HZ
# Sample rate : 500 - 600 HZ
# Time required : .2 sec (approx)
# Number of samples : 60 (approx) for 1 rev (360) ? , see angle problem
# Angle Delta : Generally 3.XX degrees (approx)
# Angle problem : See in 0 - 120 degreee range , large (10 degres) angle deltas
# Revolution : 1 rev , 360 degrees
# Zero Angle : One near zero reading in samples
# 9. Angular resolution : 1.4 - 7.2 degrees based on rotational speed (Other factors ?)
#
# Acknownledgements : None
#
# Releases:
# 03/28/2020 : First
# 03/31/2020 : Version 0.2
# 1. Fixed DX stop issue: Fixed scanse DX command return number of bytes ,
# added scanse flush routine , class Scanse_Control : scanse_flush()
# 2. Added get scan based on number of samples requested , also does not rely on large serial input buffer ,
# class Scanse_Control : rx_scan_samples().
# Times observed for 60 samples .150 - .23 seconds @ motor speed = 5HZ , LIDAR sample rate = 500 - 600 HZ
# 3. Added scan data to PGM file , helps visualize point cloud
#
###########################################################################
################################### Imports ###############################
import time
import serial
import sys
from scanse_pgm import *
################################## Scanse Serial Port #########################################
class Scanse_Control:
def __init__(self):
self.uart = None
self.port = None
def connect(self, port = None):
if self.uart:
return 0
if port is None : port = self.port
# Open serial port connection
# port is a string based on OS:
# Examples: Windows 'COM12' , Linux: '/dev/ttyACM0'
try:
self.uart = serial.Serial(port, baudrate=115200, timeout=1)
self.port = port
return 0 , None
except:
self.uart = None
self.port = None
return 1 , 'Serial port connection error !'
def disconnect(self):
if self.uart:
self.uart.close()
self.uart = None
def tx(self,cmd_list):
try:
#self.uart.write(''.join(chr(e) for e in cmd_list))
self.uart.write(cmd_list)
return 0 , None
except serial.SerialException:
return 1 ,'Command: Serial Port Failed'
def rx(self, n, delay = 0):
if delay != 0 : time.sleep(delay)
try:
nb = self.uart.inWaiting()
#print(nb)
if nb == 0: return 1 , 'RxBytes: Zero serial bytes'
if n == '!': n = nb
if n != nb:
self.uart.flush()
return 1 , 'RxBytes: Expected : ' + str(n) + ' Received : ' + str(nb)
data = self.uart.read(n)
return 0 , data
except serial.SerialException:
return 1, 'RxBytes: Serial Port Failed'
def rx_scan(self):
try:
nb = self.uart.inWaiting()
data = self.uart.read(nb)
except:
return None
return bytes(data)
def rx_scan_samples(self, nb):
data = bytes[0]
b = []
t = 0
try:
while(nb > 0):
t = t + 1
time.sleep(.001)
n = self.uart.inWaiting()
if n == 0 :
continue
b = self.uart.read(n)
data = data + b
nb = nb - n
except:
return 1 , t , 'rx_scan_sample error'
return 0 , t, data
def scanse_flush(self):
nb = self.uart.inWaiting()
t = 1000
while(nb != 0):
d = self.uart.read(nb)
time.sleep(.001)
nb = self.uart.inWaiting()
t = t - 1
if t == 0:
break;
return t
def flush(self):
self.uart.flush()
scanse_ctrl = Scanse_Control()
################################## Scanse Interface #########################################
class Scanse_IF():
def __init__ (self, IF , cmd , rx_bytes , decode = None):
self.IF = IF
self.cmd = cmd #['I', 'V'] + ['\n']
self.rx_nb = rx_bytes
self.data = None
self._decode = decode
self.delay = .050
def txrx(self, arg = None):
if arg is not None : self.cmd = self.cmd + arg
self.IF.tx(self.cmd + ['\n'])
if 0 == self.rx_nb : return 0, None
time.sleep(self.delay)
result, self.data = self.IF.rx(self.rx_nb)
if result : return 1, self.data
if self.data[0] != self.cmd[0] or self.data[1] != self.cmd[1] : return 1, None
return 0, self.data
def decode(self):
if self._decode is None : return self.data
return self._decode(self.data)
# IV Decode Model , Protocol , FWV , HWV , Serial Number
iv_decode = lambda x : (x[2:7] , x[7:9][::-1] , x[9:11][::-1] , x[11] , x[12:20])
scanse_iv = Scanse_IF(scanse_ctrl,['I' , 'V'] , 21 , iv_decode )
# Set Motor_Speed
# speed 0 - 10 hz , ['0','0'] - ['1','0']
scanse_ms = Scanse_IF(scanse_ctrl,['M' , 'S'] , 9)
# Motor Info
mi_decode = lambda x : (x[2:4])
scanse_mi = Scanse_IF(scanse_ctrl,['M' , 'I'] , 5 , mi_decode)
# Motor Ready
mz_decode = lambda x : (x[2:4])
scanse_mz = Scanse_IF(scanse_ctrl,['M' , 'Z'] , 5 , mz_decode)
# Device Information
di_decode = lambda x : (x[2:8] , x[8] , x[9] , x[10] , x[11:13] , x[13:17])
scanse_di = Scanse_IF(scanse_ctrl,['I' , 'D'] , 18 , di_decode)
# LIDAR Get Sample Rate
lidar_decode = lambda x : (x[2:4])
scanse_lidar_get_sr = Scanse_IF(scanse_ctrl,['L' , 'I'] , 5 , lidar_decode)
# LIDAR , Set Sample Rate
# ['0','1'] = 500 - 600 HZ
# ['0','2'] = 750 - 800 HZ
# ['0','3'] = 1000 - 1075 HZ
lidar_sr_decode = lambda x : (x[5:7])
scanse_lidar_set_sr = Scanse_IF(scanse_ctrl,['L' , 'R'] , 9 , lidar_sr_decode)
# Reset Device
scanse_reset = Scanse_IF(scanse_ctrl,['R' , 'R'] , 0)
# Stop Data Aquisition
scanse_stop_data = Scanse_IF(scanse_ctrl,['D' , 'X'] , 6)
# Start Data Aquisition
scanse_start_data = Scanse_IF(scanse_ctrl,['D' , 'S'] , 7)
############################## Data Acquisition #############################################
def measurement(s):
d = (ord(s[4]) << 8) + ord(s[3])
a_int = (ord(s[2]) << 8) + ord(s[1])
return [d, a_int/16.0]
def get_scan(delay):
scanse_ctrl.flush()
# Send DS Command , start acquisition
scanse_ctrl.tx(['D' , 'S'] + ['\n'])
# Wait for data
time.sleep(delay)
# Get data
scan = scanse_ctrl.rx_scan()
if scan is None or len(scan) < 2 : return 1,0,0, 'No Scan Data'
# Check header bytes
if scan[0] != 'D' or scan[1] != 'S' : return 1, 0, 0, 'No Scan DS header'
# Create List of samples
scan_data = []
l = len(scan)
ns = ((l - 6)/7) - 1
s = scan[6:(l - 6)]
x = 0
z = None
n = ns
for i in range(0,n):
x = i * 7
q = s[x:x+7]
w = ord(q[0])
if w & 0x01 : z = i
if w & 0xFE : return 1, i, w, 'Scan Packet Error'
da = measurement(q)
# Filter out measurements with d == 1 , error
if da[0] == 1:
ns = ns - 1
continue
scan_data.append(da)
# Send DX Command , stop acquisition
scanse_stop_data.txrx()
# Fluah scanse uart
scanse_ctrl.scanse_flush()
return 0, ns, z, scan_data
############################### Test ########################################################
def main(sys_argv):
if len(sys_argv) < 2: print("More Args Please !") ; exit(0)
port = sys_argv[1]
# Scanse Connect
result , message = scanse_ctrl.connect(port)
if result: print message ; exit(0)
print "\n"
# Scanse Flush
scanse_ctrl.scanse_flush()
# Get Version Information
scanse_ctrl.flush()
result , info = scanse_iv.txrx()
print(info if result else 'Version :' + str(scanse_iv.decode()))
#Get Device Information
scanse_ctrl.flush()
result , info = scanse_di.txrx()
print(info if result else 'Device Info : ' + str(scanse_di.decode()))
# Set LIDAR sample rate
# Lower sample rate , more light , range measurements more accurate
result , status = scanse_lidar_set_sr.txrx(['0','1'])
print(status if result else 'LIDAR Set Sample Rates Status : ' + str(scanse_lidar_set_sr.decode()))
# Get Motor Speed
result, motor_speed = scanse_mi.txrx()
ms = scanse_mi.decode()
print(motor_speed if result else 'Motor Speed : ' + str(ms))
#Get LIDAR Info
result , info = scanse_lidar_get_sr.txrx()
print(info if result else 'LIDAR Sample Rate : ' + str(scanse_lidar_get_sr.decode()))
# Get 10 Scans
data = []
for i in range(0,10):
r, n, z , data = get_scan(.225)
if r : print(data) ; break
if data != []:
print('Samples : ' + str(n) + ' Zero Index : ' + str(z))
for i in range(0,n):
print(i,data[i])
print('\n')
# Scan sorted by distance
ds = sorted(data,key = lambda data: data[0])
# Scan sorted by angle
ans = sorted(data,key = lambda data: data[1])
print('Distance Min :' + str(ds[0]))
print('Angle Min :' + str(ans[0]))
print('\n')
# PGM File
try:
scan_2_pgm(ds, int(ds[::-1][0][0]))
except:
pass
# Exit
scanse_ctrl.disconnect()
exit(0)
if __name__ == "__main__":
# one argument COM port , Example: Windows 'COM12' , Linux: '/dev/ttyACM0'
main(sys.argv)
|
python
|
from re import findall,IGNORECASE
for _ in range(int(input())):
s=input()
f=sorted(findall(r'[bcdfghjklmnpqrstvwxyz]+',s,IGNORECASE),key=lambda x: len(x),reverse=True)
print(f'{s} nao eh facil') if len(f[0])>=3 else print(f'{s} eh facil')
|
python
|
__author__ = 'Wenju Sun'
import
"""
This script tries to download given file via http and given the final status summary
"""
MAX_VALUE=10
MIN_VALUE=0
WARN_VALUE=0
CRITICAL_VALUE=0
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
STATUS_TEXT='OK'
STATUS_CODE=STATE_OK
murl="http://dl-3m.svc.mcitech.cn/items/60/185/3F4CBC95EF6DA685D498CC2090DDE6FB.zip"
def download(url):
urllib2
|
python
|
from pathlib import Path
import pytest
from seq2rel.training.callbacks.concatenation_augmentation import ConcatenationAugmentationCallback
class TestConcatenationAugmentationCallback:
def test_aug_frac_value_error(self) -> None:
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=1.1
)
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=-0.1
)
def test_on_start(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on training start, there are two plus one training examples.
concatenation_augmentation.on_start(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_epoch(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on epoch end, there are two plus one training examples.
concatenation_augmentation.on_epoch(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_end(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# This is the train data BEFORE any augmentation.
expected = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
# Purposefully modify the training data on disk, and check that `on_end` restores it
Path(concatenation_augmentation._train_data_path).write_text(expected[0].strip())
concatenation_augmentation.on_end(trainer="")
actual = Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
assert actual == expected
def test_format_instance(
self, concatenation_augmentation: ConcatenationAugmentationCallback
) -> None:
first_instance = "I am the first instance"
second_instance = "I am the second instance"
# Test with no sep_token provided
sep_token = " "
expected = first_instance + sep_token + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
# Test with sep_token provided
concatenation_augmentation._sep_token = "[SEP]"
expected = first_instance + f" {concatenation_augmentation._sep_token} " + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
def test_augment(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Load the training data and create a concatenated example.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
first_source, first_target = train_data[0].split("\t")
second_source, second_target = train_data[1].split("\t")
concatenated_one = f"{first_source} {second_source}\t{first_target} {second_target}"
concatenated_two = f"{second_source} {first_source}\t{second_target} {first_target}"
# This works because there is only two possible augmentated examples given
# `concatenation_augmentation._train_data` and `concatenation_augmentation._aug_frac`.
expected_one = train_data + [concatenated_one]
expected_two = train_data + [concatenated_two]
actual = concatenation_augmentation._augment()
assert actual == expected_one or actual == expected_two
|
python
|
from fastai import *
from fastai.vision import *
path = Path('../data/')
tfms = get_transforms(flip_vert=True)
np.random.seed(352)
data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=tfms, size=224).normalize(imagenet_stats)
data.show_batch(3, figsize=(15, 11))
# create a learner based on a pretrained densenet 121 model
learn = cnn_learner(data, models.densenet121, metrics=error_rate)
# use the learning rate finder to find the optimal learning rate
learn.lr_find()
learn.recorder.plot()
lr = 1e-2 # learning rate choosen based on the result of the learning rate finder
# train for 5 epochs
learn.fit_one_cycle(5, slice(lr))
# save the model
learn.save('stage-1-dn121')
# unfreeze and finetune
learn.load('stage-1-dn121');
learn.unfreeze()
learn.lr_find()
# use the learning rate finder again
learn.recorder.plot()
learn.fit_one_cycle(10, slice(1e-4, lr/10))
learn.save('stage-2-dn121')
# export as pickle file for deployment
learn.export('dn121.pkl')
# model interpretation
interp = ClassificationInterpretation.from_learner(learn)
# plot images where the model did not perform well
interp.plot_top_losses(4)
# plot confusion matrix
interp.plot_confusion_matrix(dpi=130)
|
python
|
from click import ClickException, echo
class ProfileBuilderException(ClickException):
"""Base exceptions for all Profile Builder Exceptions"""
class Abort(ProfileBuilderException):
"""Abort the build"""
def show(self, **kwargs):
echo(self.format_message())
class ConfigurationError(ProfileBuilderException):
"""Error in configuration"""
class BuildError(ProfileBuilderException):
"""Error during the build process"""
|
python
|
# -*- coding:utf-8 -*-
"""
目标:能够使用多线程实现同时接收多个客户端的多条信息
1.TCP服务器端
(1) 实现指定端口监听
(2) 实现服务器端地址重用,避免"Address in use"错误。
(3) 能够支持多个客户端连接.
(4) 能够支持支持不同的客户端同时收发消息(开启子线程)
(5) 服务器端主动关闭服务器,子线程随之结束.
"""
# 1. 该程序可以支持多客户端连接.
# 2. 该程序可以支持多客户端同时发送消息.
# 1. 导入模块
import socket
import threading
def recv_msg(new_client_socket,ip_port):
# 循环接收tcp 客户端的消息.
while True:
# 7. 接收客户端发送的信息。
recv_data = new_client_socket.recv(1024)
if recv_data:
# 8. 解码数据并且进行输出.
recv_text = recv_data.decode()
print("收到来自{i}的信息:{m}".format(i = str(ip_port),m = recv_text))
else:
break
# 9. 关闭和当前客户端的连接.
new_client_socket.close()
# 2. 创建套接字
tcp_serversocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 3. 设置地址可以重用
tcp_serversocket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True)
# 4. 绑定端口。
tcp_serversocket.bind(("",8888))
# 5. 设置监听,套接字由主动设置为被动.
tcp_serversocket.listen(128)
while True:
# 6. 接受客户端连接.
new_client_socket,ip_port = tcp_serversocket.accept()
print("新客户端连接:",ip_port)
# 创建线程
thre_recvmsg = threading.Thread(target=recv_msg,args=(new_client_socket,ip_port))
# 设置线程守护
thre_recvmsg.setDaemon(True)
# 启动线程
thre_recvmsg.start()
tcp_serversocket.close()
|
python
|
from __future__ import unicode_literals
import datetime
import itertools
from django.test import TestCase
from django.db import IntegrityError
from django.db.models import Prefetch
from modelcluster.models import get_all_child_relations
from modelcluster.queryset import FakeQuerySet
from tests.models import Band, BandMember, Place, Restaurant, SeafoodRestaurant, Review, Album, \
Article, Author, Category, Person, Room, House, Log, Dish, MenuItem, Wine
class ClusterTest(TestCase):
def test_can_create_cluster(self):
beatles = Band(name='The Beatles')
self.assertEqual(0, beatles.members.count())
beatles.members = [
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
]
# we should be able to query this relation using (some) queryset methods
self.assertEqual(2, beatles.members.count())
self.assertEqual('John Lennon', beatles.members.all()[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__exact='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__iexact='paul mccartNEY')[0].name)
self.assertEqual(0, beatles.members.filter(name__lt='B').count())
self.assertEqual(1, beatles.members.filter(name__lt='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__lt='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='Paul McCartney')[0].name)
self.assertEqual(2, beatles.members.filter(name__lt='Z').count())
self.assertEqual(0, beatles.members.filter(name__lte='B').count())
self.assertEqual(1, beatles.members.filter(name__lte='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lte='M')[0].name)
self.assertEqual(2, beatles.members.filter(name__lte='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__lte='Z').count())
self.assertEqual(2, beatles.members.filter(name__gt='B').count())
self.assertEqual(1, beatles.members.filter(name__gt='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gt='M')[0].name)
self.assertEqual(0, beatles.members.filter(name__gt='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__gte='B').count())
self.assertEqual(1, beatles.members.filter(name__gte='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__gte='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='Paul McCartney')[0].name)
self.assertEqual(0, beatles.members.filter(name__gte='Z').count())
self.assertEqual(1, beatles.members.filter(name__contains='Cart').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__contains='Cart')[0].name)
self.assertEqual(1, beatles.members.filter(name__icontains='carT').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__icontains='carT')[0].name)
self.assertEqual(1, beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.filter(name__startswith='Paul').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__startswith='Paul')[0].name)
self.assertEqual(1, beatles.members.filter(name__istartswith='pauL').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__istartswith='pauL')[0].name)
self.assertEqual(1, beatles.members.filter(name__endswith='ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__endswith='ney')[0].name)
self.assertEqual(1, beatles.members.filter(name__iendswith='Ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__iendswith='Ney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.get(name='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__exact='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iexact='paul mccartNEY').name)
self.assertEqual('John Lennon', beatles.members.get(name__lt='Paul McCartney').name)
self.assertEqual('John Lennon', beatles.members.get(name__lte='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gt='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gte='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__contains='Cart').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__icontains='carT').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__in=['Paul McCartney', 'Linda McCartney']).name)
self.assertEqual('Paul McCartney', beatles.members.get(name__startswith='Paul').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__istartswith='pauL').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__endswith='ney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iendswith='Ney').name)
self.assertEqual('John Lennon', beatles.members.get(name__regex=r'n{2}').name)
self.assertEqual('John Lennon', beatles.members.get(name__iregex=r'N{2}').name)
self.assertRaises(BandMember.DoesNotExist, lambda: beatles.members.get(name='Reginald Dwight'))
self.assertRaises(BandMember.MultipleObjectsReturned, lambda: beatles.members.get())
self.assertEqual([('Paul McCartney',)], beatles.members.filter(name='Paul McCartney').values_list('name'))
self.assertEqual(['Paul McCartney'], beatles.members.filter(name='Paul McCartney').values_list('name', flat=True))
# quick-and-dirty check that we can invoke values_list with empty args list
beatles.members.filter(name='Paul McCartney').values_list()
self.assertTrue(beatles.members.filter(name='Paul McCartney').exists())
self.assertFalse(beatles.members.filter(name='Reginald Dwight').exists())
self.assertEqual('John Lennon', beatles.members.first().name)
self.assertEqual('Paul McCartney', beatles.members.last().name)
self.assertTrue('John Lennon', beatles.members.order_by('name').first())
self.assertTrue('Paul McCartney', beatles.members.order_by('-name').first())
# these should not exist in the database yet
self.assertFalse(Band.objects.filter(name='The Beatles').exists())
self.assertFalse(BandMember.objects.filter(name='John Lennon').exists())
beatles.save()
# this should create database entries
self.assertTrue(Band.objects.filter(name='The Beatles').exists())
self.assertTrue(BandMember.objects.filter(name='John Lennon').exists())
john_lennon = BandMember.objects.get(name='John Lennon')
beatles.members = [john_lennon]
# reassigning should take effect on the in-memory record
self.assertEqual(1, beatles.members.count())
# but not the database
self.assertEqual(2, Band.objects.get(name='The Beatles').members.count())
beatles.save()
# now updated in the database
self.assertEqual(1, Band.objects.get(name='The Beatles').members.count())
self.assertEqual(1, BandMember.objects.filter(name='John Lennon').count())
# removed member should be deleted from the db entirely
self.assertEqual(0, BandMember.objects.filter(name='Paul McCartney').count())
# queries on beatles.members should now revert to SQL
self.assertTrue(beatles.members.extra(where=["tests_bandmember.name='John Lennon'"]).exists())
def test_related_manager_assignment_ops(self):
beatles = Band(name='The Beatles')
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
beatles.members.add(john)
self.assertEqual(1, beatles.members.count())
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
# ensure that duplicates are filtered
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
beatles.members.remove(john)
self.assertEqual(1, beatles.members.count())
self.assertEqual(paul, beatles.members.all()[0])
george = beatles.members.create(name='George Harrison')
self.assertEqual(2, beatles.members.count())
self.assertEqual('George Harrison', george.name)
beatles.members.set([john])
self.assertEqual(1, beatles.members.count())
self.assertEqual(john, beatles.members.all()[0])
def test_can_pass_child_relations_as_constructor_kwargs(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertEqual(2, beatles.members.count())
self.assertEqual(beatles, beatles.members.all()[0].band)
def test_can_access_child_relations_of_superclass(self):
fat_duck = Restaurant(name='The Fat Duck', serves_hot_dogs=False, reviews=[
Review(author='Michael Winner', body='Rubbish.')
])
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
self.assertEqual(fat_duck, fat_duck.reviews.all()[0].place)
fat_duck.save()
# ensure relations have been saved to the database
fat_duck = Restaurant.objects.get(id=fat_duck.id)
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
def test_can_only_commit_on_saved_parent(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
beatles.save()
beatles.members.commit()
def test_integrity_error_with_none_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = None
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
# this should work fine, as Django will end up cloning this entity
beatles.save()
self.assertEqual(Band.objects.get(pk=beatles.pk).name, 'The Beatles')
def test_model_with_zero_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = 0
beatles.members.commit()
beatles.save()
self.assertEqual(Band.objects.get(pk=0).name, 'The Beatles')
def test_save_with_update_fields(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
], albums=[
Album(name='Please Please Me', sort_order=1),
Album(name='With The Beatles', sort_order=2),
Album(name='Abbey Road', sort_order=3),
])
beatles.save()
# modify both relations, but only commit the change to members
beatles.members.clear()
beatles.albums.clear()
beatles.name = 'The Rutles'
beatles.save(update_fields=['name', 'members'])
updated_beatles = Band.objects.get(pk=beatles.pk)
self.assertEqual(updated_beatles.name, 'The Rutles')
self.assertEqual(updated_beatles.members.count(), 0)
self.assertEqual(updated_beatles.albums.count(), 3)
def test_queryset_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual('Paul McCartney', beatles.members.get(id=2).name)
self.assertEqual('Paul McCartney', beatles.members.get(id='2').name)
self.assertEqual(1, beatles.members.filter(name='Paul McCartney').count())
# also need to be able to filter on foreign fields that return a model instance
# rather than a simple python value
self.assertEqual(2, beatles.members.filter(band=beatles).count())
# and ensure that the comparison is not treating all unsaved instances as identical
rutles = Band(name='The Rutles')
self.assertEqual(0, beatles.members.filter(band=rutles).count())
# and the comparison must be on the model instance's ID where available,
# not by reference
beatles.save()
beatles.members.add(BandMember(id=3, name='George Harrison')) # modify the relation so that we're not to a plain database-backed queryset
also_beatles = Band.objects.get(id=beatles.id)
self.assertEqual(3, beatles.members.filter(band=also_beatles).count())
def test_queryset_filtering_on_models_with_inheritance(self):
strawberry_fields = Restaurant.objects.create(name='Strawberry Fields')
the_yellow_submarine = SeafoodRestaurant.objects.create(name='The Yellow Submarine')
john = BandMember(name='John Lennon', favourite_restaurant=strawberry_fields)
ringo = BandMember(name='Ringo Starr', favourite_restaurant=Restaurant.objects.get(name='The Yellow Submarine'))
beatles = Band(name='The Beatles', members=[john, ringo])
# queried instance is less specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=Place.objects.get(name='Strawberry Fields'))),
[john]
)
# queried instance is more specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=the_yellow_submarine)),
[ringo]
)
def test_queryset_exclude_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual(1, beatles.members.exclude(name='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__exact='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__exact='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iexact='paul mccartNEY').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iexact='paul mccartNEY').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='M').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__lte='John Lennon').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lte='John Lennon').first().name)
self.assertEqual(1, beatles.members.exclude(name__gt='M').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__gte='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gte='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__contains='Cart').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__contains='Cart').first().name)
self.assertEqual(1, beatles.members.exclude(name__icontains='carT').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__icontains='carT').first().name)
self.assertEqual(1, beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('John Lennon', beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.exclude(name__startswith='Paul').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__startswith='Paul').first().name)
self.assertEqual(1, beatles.members.exclude(name__istartswith='pauL').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__istartswith='pauL').first().name)
self.assertEqual(1, beatles.members.exclude(name__endswith='ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__endswith='ney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iendswith='Ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iendswith='Ney').first().name)
def test_queryset_filter_with_nulls(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="", release_date=None),
Album(name=None, release_date=None),
])
self.assertEqual(tmbg.albums.get(name="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name="").name, "")
self.assertEqual(tmbg.albums.get(name=None).name, None)
self.assertEqual(tmbg.albums.get(name__exact="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__exact="").name, "")
self.assertEqual(tmbg.albums.get(name__exact=None).name, None)
self.assertEqual(tmbg.albums.get(name__iexact="flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iexact="").name, "")
self.assertEqual(tmbg.albums.get(name__iexact=None).name, None)
self.assertEqual(tmbg.albums.get(name__contains="loo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__icontains="LOO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__startswith="Flo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__istartswith="flO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__endswith="ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iendswith="Ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__lt="A").name, "")
self.assertEqual(tmbg.albums.get(name__lte="A").name, "")
self.assertEqual(tmbg.albums.get(name__gt="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__gte="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__in=["Flood", "Mink Car"]).name, "Flood")
self.assertEqual(tmbg.albums.get(name__in=["", "Mink Car"]).name, "")
self.assertEqual(tmbg.albums.get(name__in=[None, "Mink Car"]).name, None)
self.assertEqual(tmbg.albums.filter(name__isnull=True).count(), 1)
self.assertEqual(tmbg.albums.filter(name__isnull=False).count(), 4)
self.assertEqual(tmbg.albums.get(name__regex=r'l..d').name, "Flood")
self.assertEqual(tmbg.albums.get(name__iregex=r'f..o').name, "Flood")
def test_date_filters(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="The Complete Dial-A-Song", release_date=None),
])
logs = FakeQuerySet(Log, [
Log(time=datetime.datetime(1979, 7, 1, 1, 1, 1), data="nobody died"),
Log(time=datetime.datetime(1980, 2, 2, 2, 2, 2), data="one person died"),
Log(time=None, data="nothing happened")
])
self.assertEqual(
tmbg.albums.get(release_date__range=(datetime.date(1994, 1, 1), datetime.date(1994, 12, 31))).name,
"John Henry"
)
self.assertEqual(
logs.get(time__range=(datetime.datetime(1980, 1, 1, 1, 1, 1), datetime.datetime(1980, 12, 31, 23, 59, 59))).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__date=datetime.date(1994, 7, 21)).name,
"John Henry"
)
self.assertEqual(
logs.get(time__date=datetime.date(1980, 2, 2)).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__year='1994').name,
"John Henry"
)
self.assertEqual(
logs.get(time__year=1980).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__month=7).name,
"John Henry"
)
self.assertEqual(
logs.get(time__month='2').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__day='21').name,
"John Henry"
)
self.assertEqual(
logs.get(time__day=2).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week=29).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week='5').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week_day=5).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week_day=7).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__quarter=3).name,
"John Henry"
)
self.assertEqual(
logs.get(time__quarter=1).data,
"one person died"
)
self.assertEqual(
logs.get(time__time=datetime.time(2, 2, 2)).data,
"one person died"
)
self.assertEqual(
logs.get(time__hour=2).data,
"one person died"
)
self.assertEqual(
logs.get(time__minute='2').data,
"one person died"
)
self.assertEqual(
logs.get(time__second=2).data,
"one person died"
)
def test_prefetch_related(self):
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [list(band.members.all()) for band in Band.objects.prefetch_related('members')]
normal_lists = [list(band.members.all()) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [
list(band.members.all())
for band in Band.objects.prefetch_related(
Prefetch('members', queryset=BandMember.objects.filter(name__startswith='Paul'))
)
]
normal_lists = [list(band.members.filter(name__startswith='Paul')) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_order_by_with_multiple_fields(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=2),
])
albums = [album.name for album in beatles.albums.order_by('sort_order', 'name')]
self.assertEqual(['With The Beatles', 'Abbey Road', 'Please Please Me'], albums)
albums = [album.name for album in beatles.albums.order_by('sort_order', '-name')]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_meta_ordering(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=3),
])
# in the absence of an explicit order_by clause, it should use the ordering as defined
# in Album.Meta, which is 'sort_order'
albums = [album.name for album in beatles.albums.all()]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_parental_key_checks_clusterable_model(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, BandMember is not a Clusterable model
member = ParentalKey(BandMember, on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E001')
self.assertEqual(error.obj, Instrument.member.field)
self.assertEqual(error.msg, 'ParentalKey must point to a subclass of ClusterableModel.')
self.assertEqual(error.hint, 'Change tests.BandMember into a ClusterableModel or use a ForeignKey instead.')
def test_parental_key_checks_related_name_is_not_plus(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, related_name='+' is not allowed
band = ParentalKey(Band, related_name='+', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E002')
self.assertEqual(error.obj, Instrument.band.field)
self.assertEqual(error.msg, "related_name='+' is not allowed on ParentalKey fields")
self.assertEqual(error.hint, "Either change it to a valid name or remove it")
def test_parental_key_checks_target_is_resolved_as_class(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
banana = ParentalKey('Banana', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'fields.E300')
self.assertEqual(error.obj, Instrument.banana.field)
self.assertEqual(error.msg, "Field defines a relation with model 'Banana', which is either not installed, or is abstract.")
class GetAllChildRelationsTest(TestCase):
def test_get_all_child_relations(self):
self.assertEqual(
set([rel.name for rel in get_all_child_relations(Restaurant)]),
set(['tagged_items', 'reviews', 'menu_items'])
)
class ParentalM2MTest(TestCase):
def setUp(self):
self.article = Article(title="Test Title")
self.author_1 = Author.objects.create(name="Author 1")
self.author_2 = Author.objects.create(name="Author 2")
self.article.authors = [self.author_1, self.author_2]
self.category_1 = Category.objects.create(name="Category 1")
self.category_2 = Category.objects.create(name="Category 2")
self.article.categories = [self.category_1, self.category_2]
def test_uninitialised_m2m_relation(self):
# Reading an m2m relation of a newly created object should return an empty queryset
new_article = Article(title="Test title")
self.assertEqual([], list(new_article.authors.all()))
self.assertEqual(new_article.authors.count(), 0)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, new_article.authors.model)
def test_parentalm2mfield(self):
# Article should not exist in the database yet
self.assertFalse(Article.objects.filter(title='Test Title').exists())
# Test lookup on parental M2M relation
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, self.article.authors.model)
# Test adding to the relation
author_3 = Author.objects.create(name="Author 3")
self.article.authors.add(author_3)
self.assertEqual(
['Author 1', 'Author 2', 'Author 3'],
[author.name for author in self.article.authors.all().order_by('name')]
)
self.assertEqual(self.article.authors.count(), 3)
# Test removing from the relation
self.article.authors.remove(author_3)
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# Test clearing the relation
self.article.authors.clear()
self.assertEqual(
[],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 0)
# Test the 'set' operation
self.article.authors.set([self.author_2])
self.assertEqual(self.article.authors.count(), 1)
self.assertEqual(
['Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
# Test saving to / restoring from DB
self.article.authors = [self.author_1, self.author_2]
self.article.save()
self.article = Article.objects.get(title="Test Title")
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
def test_constructor(self):
# Test passing values for M2M relations as kwargs to the constructor
article2 = Article(
title="Test article 2",
authors=[self.author_1],
categories=[self.category_2],
)
self.assertEqual(
['Author 1'],
[author.name for author in article2.authors.order_by('name')]
)
self.assertEqual(article2.authors.count(), 1)
def test_ordering(self):
# our fake querysets should respect the ordering defined on the target model
bela_bartok = Author.objects.create(name='Bela Bartok')
graham_greene = Author.objects.create(name='Graham Greene')
janis_joplin = Author.objects.create(name='Janis Joplin')
simon_sharma = Author.objects.create(name='Simon Sharma')
william_wordsworth = Author.objects.create(name='William Wordsworth')
article3 = Article(title="Test article 3")
article3.authors = [
janis_joplin, william_wordsworth, bela_bartok, simon_sharma, graham_greene
]
self.assertEqual(
list(article3.authors.all()),
[bela_bartok, graham_greene, janis_joplin, simon_sharma, william_wordsworth]
)
def test_save_m2m_with_update_fields(self):
self.article.save()
# modify both relations, but only commit the change to authors
self.article.authors.clear()
self.article.categories.clear()
self.article.title = 'Updated title'
self.article.save(update_fields=['title', 'authors'])
self.updated_article = Article.objects.get(pk=self.article.pk)
self.assertEqual(self.updated_article.title, 'Updated title')
self.assertEqual(self.updated_article.authors.count(), 0)
self.assertEqual(self.updated_article.categories.count(), 2)
def test_reverse_m2m_field(self):
# article is unsaved, so should not be returned by the reverse relation on author
self.assertEqual(self.author_1.articles_by_author.count(), 0)
self.article.save()
# should now be able to look up on the reverse relation
self.assertEqual(self.author_1.articles_by_author.count(), 1)
self.assertEqual(self.author_1.articles_by_author.get(), self.article)
article_2 = Article(title="Test Title 2")
article_2.authors = [self.author_1]
article_2.save()
self.assertEqual(self.author_1.articles_by_author.all().count(), 2)
self.assertEqual(
list(self.author_1.articles_by_author.order_by('title').values_list('title', flat=True)),
['Test Title', 'Test Title 2']
)
def test_value_from_object(self):
authors_field = Article._meta.get_field('authors')
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
self.article.save()
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
class ParentalManyToManyPrefetchTests(TestCase):
def setUp(self):
# Create 10 articles with 10 authors each.
authors = Author.objects.bulk_create(
Author(id=i, name=str(i)) for i in range(10)
)
authors = Author.objects.all()
for i in range(10):
article = Article(title=str(i))
article.authors = authors
article.save()
def get_author_names(self, articles):
return [
author.name
for article in articles
for author in article.authors.all()
]
def test_prefetch_related(self):
with self.assertNumQueries(11):
names = self.get_author_names(Article.objects.all())
with self.assertNumQueries(2):
prefetched_names = self.get_author_names(
Article.objects.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
with self.assertNumQueries(2):
names = self.get_author_names(
Article.objects.prefetch_related(
Prefetch('authors', queryset=Author.objects.filter(name__lt='5'))
)
)
self.assertEqual(len(names), 50)
def test_prefetch_from_fake_queryset(self):
article = Article(title='Article with related articles')
article.related_articles = list(Article.objects.all())
with self.assertNumQueries(10):
names = self.get_author_names(article.related_articles.all())
with self.assertNumQueries(1):
prefetched_names = self.get_author_names(
article.related_articles.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
class PrefetchRelatedTest(TestCase):
def test_fakequeryset_prefetch_related(self):
person1 = Person.objects.create(name='Joe')
person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(name='House 1', address='123 Main St', owner=person1)
room1_1 = Room.objects.create(name='Dining room')
room1_2 = Room.objects.create(name='Lounge')
room1_3 = Room.objects.create(name='Kitchen')
house1.main_room = room1_1
house1.save()
house2 = House(name='House 2', address='45 Side St', owner=person1)
room2_1 = Room.objects.create(name='Eating room')
room2_2 = Room.objects.create(name='TV Room')
room2_3 = Room.objects.create(name='Bathroom')
house2.main_room = room2_1
person1.houses = itertools.chain(House.objects.all(), [house2])
houses = person1.houses.all()
with self.assertNumQueries(1):
qs = person1.houses.prefetch_related('main_room')
with self.assertNumQueries(0):
main_rooms = [ house.main_room for house in person1.houses.all() ]
self.assertEqual(len(main_rooms), 2)
def test_prefetch_related_with_lookup(self):
restaurant1 = Restaurant.objects.create(name='The Jolly Beaver')
restaurant2 = Restaurant.objects.create(name='The Prancing Rhino')
dish1 = Dish.objects.create(name='Goodies')
dish2 = Dish.objects.create(name='Baddies')
wine1 = Wine.objects.create(name='Chateau1')
wine2 = Wine.objects.create(name='Chateau2')
menu_item1 = MenuItem.objects.create(restaurant=restaurant1, dish=dish1, recommended_wine=wine1, price=1)
menu_item2 = MenuItem.objects.create(restaurant=restaurant2, dish=dish2, recommended_wine=wine2, price=10)
query = Restaurant.objects.all().prefetch_related(
Prefetch('menu_items', queryset=MenuItem.objects.only('price', 'recommended_wine').select_related('recommended_wine'))
)
res = list(query)
self.assertEqual(query[0].menu_items.all()[0], menu_item1)
self.assertEqual(query[1].menu_items.all()[0], menu_item2)
|
python
|
import functools
import tornado.options
def define_options(option_parser):
# Debugging
option_parser.define(
'debug', default=False, type=bool,
help="Turn on autoreload and log to stderr",
callback=functools.partial(enable_debug, option_parser),
group='Debugging')
def config_callback(path):
option_parser.parse_config_file(path, final=False)
option_parser.define(
"config", type=str, help="Path to config file",
callback=config_callback, group='Config file')
# Application
option_parser.define(
'autoreload', type=bool, default=False, group='Application')
option_parser.define('cookie_secret', type=str, group='Application')
option_parser.define('port', default=8888, type=int, help=(
"Server port"), group='Application')
# Startup
option_parser.define('ensure_indexes', default=False, type=bool, help=(
"Ensure collection indexes before starting"), group='Startup')
option_parser.define('rebuild_indexes', default=False, type=bool, help=(
"Drop all indexes and recreate before starting"), group='Startup')
# Identity
option_parser.define('host', default='localhost', type=str, help=(
"Server hostname"), group='Identity')
option_parser.define('blog_name', type=str, help=(
"Display name for the site"), group='Identity')
option_parser.define('base_url', type=str, help=(
"Base url, e.g. 'blog'"), group='Identity')
option_parser.define('author_display_name', type=str, help=(
"Author name to display in posts and titles"), group='Identity')
option_parser.define('author_email', type=str, help=(
"Author email to display in feed"), group='Identity')
option_parser.define('twitter_handle', type=str, help=(
"Author's Twitter handle (no @-sign)"), group='Identity')
option_parser.define('disqus_shortname', type=str, help=(
"Site's Disqus identifier"), group='Identity')
option_parser.define('description', type=str, help=(
"Site description"), group='Identity')
# Integrations
option_parser.define('google_analytics_id', type=str, help=(
"Like 'UA-123456-1'"), group='Integrations')
option_parser.define('google_analytics_rss_id', type=str, help=(
"Like 'UA-123456-1'"), group='Integrations')
# Admin
option_parser.define('user', type=str, group='Admin')
option_parser.define('password', type=str, group='Admin')
# Appearance
option_parser.define('nav_menu', type=list, default=[], help=(
"List of url, title, CSS-class triples (define this in your"
" motor_blog.conf)'"), group='Appearance')
option_parser.define('theme', type=str, default='theme', help=(
"Directory name of your theme files"), group='Appearance')
option_parser.define('home_page', type=str, group='Appearance', help=(
"Slug of a static home page (default: recent posts)"))
option_parser.define(
'timezone', type=str, default='America/New_York',
help="Your timezone name", group='Appearance')
option_parser.add_parse_callback(
functools.partial(check_required_options, option_parser))
def check_required_options(option_parser):
for required_option_name in (
'host', 'port', 'blog_name', 'base_url', 'cookie_secret', 'timezone',
):
if not getattr(option_parser, required_option_name, None):
message = (
'%s required. (Did you forget to pass'
' --config=CONFIG_FILE?)' % (
required_option_name))
raise tornado.options.Error(message)
def enable_debug(option_parser, debug):
if debug:
option_parser.log_to_stderr = True
option_parser.autoreload = True
|
python
|
from django.urls import path
from . import books_views
urlpatterns = [
path('books/', books_views.index, name='books'),
]
|
python
|
""" Game API for Pacman """
import random
from collections import defaultdict
from abc import ABC, abstractmethod
import math
import mcts
import copy
import torch as tr
import pacman_net as pn
import pacman_data as pd
import numpy as np
class Node(ABC):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
@abstractmethod
#find all the successors of the state
def find_children(self):
return set()
@abstractmethod
def random_child(self):
return None
# return true if no child
@abstractmethod
def is_leaf(self):
return True
#score
@abstractmethod
def score(self):
return 0
@abstractmethod
#node must be hashable
def __hash__(self):
return 123456
@abstractmethod
#nodes should be comparable
def __eq__(node1, node2):
return True
class MazeGameBoard():
scores_to_win = 100
max_steps = 40
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
def __init__(self, L, ghosts, pos_i, pos_j, score):
self.board = L
self.ghosts = ghosts
self.pac_i = pos_i
self.pac_j = pos_j
self.score = score
self.current_steps = 0
# 0 for pacman 1 for ghost turn
def gameOver(self):
return self.isCaught() or self.isWon() or self.current_steps >= MazeGameBoard.max_steps
def isCaught(self):
for ghost in self.ghosts:
if self.pac_i == ghost.row and self.pac_j == ghost.col:
return True
return False
def isWon(self):
return self.score == MazeGameBoard.scores_to_win
def one_step_more(self):
self.current_steps += 1
class ghost:
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
initPos = [[2,3],[6,13]] #
currentIndex = 0
oldpoint = 0
def __init__(self, L):
# 0: i++(go down), 1: j++ (go right), 2:i--(go up), 3: j-- (go left)
self.dir = random.randint(0,3)
if self.currentIndex >= len(self.initPos):
raise RuntimeError("try to init too many ghosts")
m = self.initPos[self.currentIndex][0]
n = self.initPos[self.currentIndex][1]
L[m][n] = 'X'
self.row = m
self.col = n
ghost.currentIndex += 1
def move(self, go, L):
if self.oldpoint == 'X' :
L[self.row][self.col] = 0
else :
L[self.row][self.col] = self.oldpoint
self.row += self.directionsDic[go][0]
self.col += self.directionsDic[go][1]
self.oldpoint = L[self.row][self.col]
L[self.row][self.col] = 'X'
def smallMaze(ghost_num, slippery_num) :
L= [[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2],
[2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[2,0,0,0,1,1,0,1,0,0,1,0,1,1,0,2,0,2],
[2,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,0,2],
[2,0,0,0,1,1,0,1,0,0,1,0,1,1,0,2,0,2],
[2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[2,0,1,1,0,2,0,1,1,1,1,0,2,0,1,1,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2]]
ghosts = []
ghost.currentIndex = 0
for i in range(ghost_num):
ghosts.append(ghost(L))
count = 0
while count < slippery_num:
m = random.randint(1,len(L)-1)
n = random.randint(1,len(L[0])-1)
if L[m][n] == 0:
L[m][n] = 3
count += 1
return L, ghosts
def bigMaze(ghost_num, slippery_num):
L= [[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,0,2,0,2,0,2,0,2,1,1,1,2,0,0,0,2,0,0,0,2],
[2,0,0,0,0,2,0,2,0,0,0,2,0,0,0,0,0,2,0,2,0,2],
[2,0,0,0,0,2,0,2,0,0,0,2,0,0,0,0,0,0,0,2,0,2],
[2,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,2,0,2],
[2,0,0,2,0,2,0,0,0,2,0,0,0,0,0,0,0,2,0,2,0,2],
[2,0,0,2,0,2,1,2,0,2,0,0,0,2,0,2,0,2,0,2,0,2],
[2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,2,0,0,0,2,0,2],
[2,0,1,2,0,2,0,2,0,1,0,0,0,0,0,1,0,2,0,2,0,2],
[2,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,2,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2]]
ghosts = []
ghost.currentIndex = 0
for i in range(ghost_num):
ghosts.append(ghost(L))
count = 0
while count < slippery_num:
m = random.randint(1,len(L)-1)
n = random.randint(1,len(L[0])-1)
if L[m][n] == 0:
L[m][n] = 3
count += 1
return L, ghosts
# the ghost changes his direction
def randomGhostAction(L, ghost):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
dir = ghost.dir
i = ghost.row
j = ghost.col
nextI = i + directionsDic[dir][0]
nextJ = j + directionsDic[dir][1]
if isValid(L, nextI, nextJ):
return dir
randomList =[]
for a in range(4):
if(a != dir):
randomList.append(a)
random.shuffle(randomList)
for a in range(len(randomList)):
dir = randomList[a]
nextI = i + directionsDic[dir][0]
nextJ = j + directionsDic[dir][1]
if isValid(L, nextI, nextJ):
return dir
print()
print("should never reach here, Reaching here means we have a poor ghost in a dead corner")
print()
def eclideanGhostAction(L, ghost, pos_i, pos_j):
i = ghost.row
j = ghost.col
dir = [[1,0], [0,1], [-1,0], [0,-1]]
distance = []
for n in range(4):
a = i + dir[n][0]
b = j + dir[n][1]
if isValid(L, a, b):
dis = ((pos_i - a)**2 + (pos_j - b)**2)**(1/2)
distance.append(dis)
else:
distance.append(float("inf"))
minDis = min(distance)
return distance.index(minDis)
def manhanttanGhostAction(L, ghost, pos_i, pos_j):
i = ghost.row
j = ghost.col
dir = [[1,0], [0,1], [-1,0], [0,-1]]
distance = []
for n in range(4):
a = i + dir[n][0]
b = j + dir[n][1]
if isValid(L, a, b):
dis = abs(pos_i - a) + abs(pos_j - b)
distance.append(dis)
else:
distance.append(float("inf"))
minDis = min(distance)
return distance.index(minDis)
def isValid(L, i, j):
if i<= 0 or j<=0 or i >= len(L) - 1 or j>= len(L[0]) - 1 or L[i][j] == 1 or L[i][j] == 2:
return False
return True
def instruction():
print()
print("""Instructions:
The AI Pacman will take his way to move up, down, left or right to eat more dots and avoid being caught by ghosts.
Wish him good luck!""")
print()
#function when bumping into a wall
def wall():
print()
print("Oops! Ran into a wall! Try again!")
print()
def win_game(score):
print()
print("Good! AI got enough scores and Won!")
print("Total scores:", score)
def lose_game(score):
print()
print("Sorry! AI got caught by ghost and Lost!")
print("Total scores:", score)
#function to show the maze
def maze(L, pos_i, pos_j):
for i in range(0, len(L)):
for j in range(0, len(L[0])):
if i == pos_i and j == pos_j:
print("#", end=' ')
elif L[i][j] == 0 :
print(".", end=' ')
elif L[i][j] == 1 :
print("-", end=' ')
elif L[i][j] == 2:
print("|", end=' ')
elif L[i][j] == 3:
print("*", end=' ')
else:
print(L[i][j], end=' ')
print()
def pacmanMove(action, pos_i, pos_j, score, L):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
isGameover = False
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
if not isValid(L, nextI, nextJ):
wall()
elif L[nextI][nextJ] == "X":
isGameover = True
elif L[nextI][nextJ] == 3:
n = random.randint(0, 4) #25% chance that the action failed
if n == 0:
#print("Oops! Slipped and try again")
return isGameover, pos_i, pos_j, score
elif L[nextI][nextJ] == 0:
score += 10
# print(L[pos_i][pos_j])
L[pos_i][pos_j] = " "
# L[nextI][nextJ] = "#"
return isGameover, nextI, nextJ, score
def ghostMove(ghosts):
for ghost in ghosts:
if ghosts.index(ghost) % 3 == 0:
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif ghosts.index(ghost) % 3 == 1:
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif ghosts.index(ghost) % 3 == 2:
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
# human player plays the game
def humanPlay(L, pos_i, pos_j):
score = 0
while True:
if L[pos_i][pos_j] == 0:
L[pos_i][pos_j] = " "
if L[pos_i][pos_j] == 3:
L[pos_i][pos_j] = "*"
move = input("Enter an action: ('w'=up, 's'=down, 'a'=left, 'd'=right, 'e'=exit)")
if move.lower() == "e":
print("Are you sure you want to leave the game?")
sure = input("Y/N")
if sure.lower() == "y":
print("Bye!")
break
else:
continue
if move.lower() == "s": action = 0
if move.lower() == "d": action = 1
if move.lower() == "w": action = 2
if move.lower() == "a": action = 3
isGameover, pos_i, pos_j, score = pacmanMove(action, pos_i, pos_j, score, L)
ghostMove(ghosts)
if score >= MazeGameBoard.scores_to_win:
maze(L, pos_i, pos_j)
win_game(score)
break
isOver = False
for ghost in ghosts:
if ghost.row == pos_i and ghost.col == pos_j:
maze(L, pos_i, pos_j)
lose_game(score)
isOver = True
break
if isOver: break
maze(L, pos_i, pos_j)
print("Scores:", score)
print()
# baseline AI which chooses actions uniformly at random
def randomAI(L, pos_i, pos_j):
score = 0
while True:
if L[pos_i][pos_j] == 0:
L[pos_i][pos_j] = " "
if L[pos_i][pos_j] == 3:
L[pos_i][pos_j] = "*"
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
action = random.randint(0, 3)
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
while not isValid(L, nextI, nextJ):
action = random.randint(0, 3)
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
if action == 0: nextaction = "down"
elif action == 1: nextaction = "right"
elif action == 2: nextaction = "up"
elif action == 3: nextaction = "left"
print("AI's next action:", nextaction)
input("Press Enter to continue...")
isGameover, pos_i, pos_j, score = pacmanMove(action, pos_i, pos_j, score, L)
ghostMove(ghosts)
if score >= MazeGameBoard.scores_to_win:
maze(L, pos_i, pos_j)
win_game(score)
break
isOver = False
for ghost in ghosts:
if ghost.row == pos_i and ghost.col == pos_j:
maze(L, pos_i, pos_j)
lose_game(score)
isOver = True
break
if isOver: break
maze(L, pos_i, pos_j)
print("Scores:", score)
print()
def retriveInfoFromGameBoard(gameBoard):
return gameBoard.board, gameBoard.pac_i, gameBoard.pac_j, gameBoard.score
# MCTS AI play the game
def mctsAI(gameBoard, tree, enableHandEnter):
boardStateNode = mcts.pacmanNode(gameBoard, 0)
totalNodeCount = 0
while True:
nodesCount = 0
L0, pos_i0, pos_j0, score0 = retriveInfoFromGameBoard(boardStateNode.board)
for i in range(50):
nodesCount += tree.do_rollout(boardStateNode)
if enableHandEnter:
print("Current Turns:", boardStateNode.board.current_steps)
boardStateNode.board.one_step_more()
if boardStateNode.is_terminal():
break
boardStateNode, boardStateScoreForNN = tree.choose(boardStateNode)
L, pos_i, pos_j, score = retriveInfoFromGameBoard(boardStateNode.board)
if (pos_i - pos_i0) == 1: nextaction = "down"
elif (pos_j - pos_j0) == 1: nextaction = "right"
elif (pos_i0 - pos_i) == 1: nextaction = "up"
elif (pos_j0 - pos_j) == 1: nextaction = "left"
if enableHandEnter:
print("AI's next action:", nextaction)
input("Press Enter to continue...")
if L[pos_i][pos_j] != 3:
L[pos_i][pos_j] = " "
if boardStateNode.is_terminal() == True:
break
ghosts = boardStateNode.board.ghosts
for ghost in ghosts:
if(ghosts.index(ghost) % 3 == 0):
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif(ghosts.index(ghost) % 3 == 1):
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif (ghosts.index(ghost) % 3 == 2):
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
if enableHandEnter:
maze(L, pos_i, pos_j)
print("The number of tree nodes processed:", nodesCount)
print("Scores:", score)
print()
totalNodeCount += nodesCount
# set the depth to 0 for the next round of AI search
boardStateNode = mcts.pacmanNode(boardStateNode.board, 0)
if boardStateNode.board.isWon():
if enableHandEnter:
maze(L, pos_i, pos_j)
win_game(score)
return totalNodeCount, score, True
elif boardStateNode.board.isCaught():
if enableHandEnter:
maze(L, pos_i, pos_j)
lose_game(score)
return totalNodeCount, score, False
else:
if enableHandEnter:
maze(L, pos_i, pos_j)
print("Total scores:", score)
print("The maximum steps pass, AI tied the game")
return totalNodeCount, score, False
def nn_puct(node, L, mode):
net = pn.BlockusNet3(L)
if mode == "big_1_3":
net.load_state_dict(tr.load("model_net3_big_1_3.pth" ))
elif mode == "big_2_3":
net.load_state_dict(tr.load("model_net3_big_2_3.pth" ))
elif mode == "big_2_5":
net.load_state_dict(tr.load("model_net3_big_2_5.pth" ))
elif mode == "small_1_3":
net.load_state_dict(tr.load("model_net3_small_1_3.pth" ))
elif mode == "small_2_5":
net.load_state_dict(tr.load("model_net3_small_2_5.pth" ))
with tr.no_grad():
children = list(node.find_children())
x = tr.stack(tuple(map(pd.encode, [child for child in children])))
y = net(x)
probs = tr.softmax(y.flatten(), dim=0)
a = np.random.choice(len(probs), p=probs.detach().numpy())
return list(node.find_children())[a]
def mcts_nnAI(gameBoard, mode, enableHandEnter):
tree = mcts.MCTS(choose_method = nn_puct, mode = mode)
boardStateNode = mcts.pacmanNode(gameBoard, 0)
totalNodeCount = 0
while True:
nodesCount = 0
L0, pos_i0, pos_j0, score0 = retriveInfoFromGameBoard(boardStateNode.board)
for i in range(15):
nodesCount += tree.do_rollout(boardStateNode)
if enableHandEnter:
print("Current Turns:", boardStateNode.board.current_steps)
boardStateNode.board.one_step_more()
if boardStateNode.is_terminal():
break
boardStateNode, boardStateScoreForNN = tree.choose(boardStateNode)
L, pos_i, pos_j, score = retriveInfoFromGameBoard(boardStateNode.board)
if (pos_i - pos_i0) == 1: nextaction = "down"
elif (pos_j - pos_j0) == 1: nextaction = "right"
elif (pos_i0 - pos_i) == 1: nextaction = "up"
elif (pos_j0 - pos_j) == 1: nextaction = "left"
if enableHandEnter:
print("AI's next action:", nextaction)
input("Press Enter to continue...")
if L[pos_i][pos_j] != 3:
L[pos_i][pos_j] = " "
if boardStateNode.is_terminal() == True:
break
ghosts = boardStateNode.board.ghosts
for ghost in ghosts:
if(ghosts.index(ghost) % 3 == 0):
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif(ghosts.index(ghost) % 3 == 1):
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif (ghosts.index(ghost) % 3 == 2):
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
if enableHandEnter:
maze(L, pos_i, pos_j)
print("The number of tree nodes processed:", nodesCount)
print("Scores:", score)
print()
totalNodeCount += nodesCount
# set the depth to 0 for the next round of AI search
boardStateNode = mcts.pacmanNode(boardStateNode.board, 0)
if boardStateNode.board.isWon():
if enableHandEnter:
maze(L, pos_i, pos_j)
win_game(score)
return totalNodeCount, score, True
elif boardStateNode.board.isCaught():
if enableHandEnter:
maze(L, pos_i, pos_j)
lose_game(score)
return totalNodeCount, score, False
else:
if enableHandEnter:
maze(L, pos_i, pos_j)
print("Total scores:", score)
print("The maximum steps pass, AI tied the game")
return totalNodeCount, score, False
if __name__ == "__main__":
while True :
load = input("""Please choose the problem size:
1) Enter 1 to choose big maze with 1 ghost and 3 slippery positions
2) Enter 2 to choose small maze with 1 ghost and 3 slippery positions
3) Enter 3 to choose big maze with 2 ghosts and 3 slippery positions
4) Enter 4 to choose small maze with 2 ghosts and 5 slippery positions
5) Enter 5 to choose big maze with 2 ghosts and 5 slippery positions
""")
score = 0
if load == "1":
L, ghosts = bigMaze(1,3)
pos_i, pos_j = 3, 8
mode = "big_1_3"
break
elif load == "2":
L, ghosts = smallMaze(1,3)
pos_i, pos_j = 5, 10
mode = "small_1_3"
break
elif load == "3":
L, ghosts = bigMaze(2,3)
pos_i, pos_j = 3, 8
mode = "big_2_3"
break
elif load == "4":
L, ghosts = smallMaze(2,5)
pos_i, pos_j = 5, 10
mode = "small_2_5"
break
elif load == "5":
L, ghosts = bigMaze(2,5)
pos_i, pos_j = 5, 10
mode = "big_2_5"
break
else:
print("Please enter 1,2,3,4 or 5")
while True:
ai_chosen = input("""Please choose the control strategy:
1) Enter 1 to choose human player
2) Enter 2 to choose baseline AI
3) Enter 3 to choose tree-based AI (Enter 5 to run 100 times)
4) Enter 4 to choose tree+NN-based AI(Enter 6 to run 100 times)
""")
if ai_chosen == "1" :
gameMode = "human player"
break
elif ai_chosen == "2" :
gameMode = "baseline AI"
break
elif ai_chosen == "3" :
gameMode = "tree-based AI"
break
elif ai_chosen == "4" :
gameMode = "tree+NN-based AI"
break
elif ai_chosen == "5":
gameMode = "automatic"
break
elif ai_chosen == "6":
gameMode = "automatic tree+NN-based AI"
break
else:
print("Please enter 1,2,3 4, 5, 6")
instruction()
print("Game mode:", gameMode)
print()
maze(L,pos_i,pos_j)
print()
initBoard = MazeGameBoard(L, ghosts, pos_i, pos_j, 0)
tree = mcts.MCTS()
if ai_chosen == "1":
humanPlay(L, pos_i, pos_j)
elif ai_chosen == "2":
randomAI(L, pos_i, pos_j)
elif ai_chosen == "3":
totalnodescount, finalscore, aiWon = mctsAI(copy.deepcopy(initBoard), tree, True)
print("The total number of tree nodes processed in this game is", totalnodescount)
elif ai_chosen == "4":
totalnodescount, finalscore, aiWon = mcts_nnAI(copy.deepcopy(initBoard), mode, True)
print("The total number of tree nodes processed in this game is", totalnodescount)
elif ai_chosen == "5":
nodes_list = [0]
scores_list = [0]
col = ['white']
for i in range(100):
totalnodescount = 0
totalnodescount, finalscore, aiWon = mctsAI(copy.deepcopy(initBoard), tree, False)
print("Game", i+1, ":", totalnodescount, " Score:", finalscore)
nodes_list.append(totalnodescount)
scores_list.append(finalscore)
if aiWon: col.append('#87CEFA')
else: col.append('#FFA500')
import matplotlib.pyplot as plt
plt.bar(range(len(nodes_list)), nodes_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Number of tree nodes processed")
plt.title("Efficiency")
plt.show()
plt.bar(range(len(scores_list)), scores_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Final scores")
plt.title("Performance")
plt.show()
elif ai_chosen == "6":
nodes_list = [0]
scores_list = [0]
col = ['white']
for i in range(100):
totalnodescount = 0
totalnodescount, finalscore, aiWon = mcts_nnAI(copy.deepcopy(initBoard), False)
# print("Game", i+1, ":", totalnodescount, " Score:", finalscore)
nodes_list.append(totalnodescount)
scores_list.append(finalscore)
if aiWon: col.append('#87CEFA')
else: col.append('#FFA500')
import matplotlib.pyplot as plt
plt.bar(range(len(nodes_list)), nodes_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Number of tree nodes processed")
plt.title("Efficiency")
plt.show()
plt.bar(range(len(scores_list)), scores_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Final scores")
plt.title("Performance")
plt.show()
|
python
|
import streamlit as st
import pandas as pd
from tfidf import get_vocab_idf
st.title('Binary Classification')
st.write("This app shows the featurization created from delta tf-idf for binary classification.")
# Sidebar
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"],
help="The labels must be 0 or 1 and the column names must be 'text' and 'label'",
)
with st.sidebar.header("Display parameters"):
idf_range = st.sidebar.slider(label="IDF Range", min_value=-7., max_value=7., step=.5, value=(-7., 7.))
with st.sidebar.header("Feature parameters"):
df_range = st.sidebar.slider(label="Document frequency range", min_value=0.,
max_value=1., step=.0001, value=(0., 1.),
help="Vocabulary outside this range will not be considered")
# Main page
st.subheader('1. Dataset')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
if st.checkbox(label="View dataset"):
st.write(df)
cnt_0 = df.loc[df['label'] == 0].shape[0]
cnt_1 = df.loc[df['label'] == 1].shape[0]
st.write(f"There are {cnt_0} samples from class 0 and {cnt_1} from class 1.")
if cnt_0 > cnt_1:
st.write(f"Class 0 is the majority class with {cnt_0/df.shape[0]*100:.4f}%")
else:
st.write(f"Class 1 is the majority class with {cnt_1 / df.shape[0]*100:.4f}%")
vocab = get_vocab_idf(df, min_df=df_range[0], max_df=df_range[1])
top_n = vocab.loc[vocab['Delta-Idf'].between(idf_range[0], idf_range[1])]\
.sort_values('Delta-Idf', ascending=False).head(10)
bottom_n = vocab.loc[vocab['Delta-Idf'].between(idf_range[0], idf_range[1])]\
.sort_values('Delta-Idf', ascending=True).head(10)
st.subheader("2. Most relevant words")
right_col, left_col = st.columns(2)
right_col.write("Top 10 most relevant words for negative (0) class")
right_col.dataframe(top_n)
left_col.write("Top 10 most relevant words for positive (1) class")
left_col.dataframe(bottom_n)
st.subheader("3. Word search")
search_word = st.text_input("Input word to search:", )
right_word, left_idf = st.columns(2)
right_word.markdown("#### Word")
left_idf.markdown("#### Delta-Idf")
right_word.write(search_word)
if vocab['Word'].isin([search_word]).any():
found_idf = vocab.loc[vocab['Word'] == search_word, 'Delta-Idf'].values[0]
left_idf.write(found_idf)
else:
if search_word != '': left_idf.write("Word not found.")
else:
st.write('Awaiting Dataset...')
|
python
|
from country_assignment import assign_countries_by_priority
from data_processing.player_data import PlayerData
from flask import (Flask, redirect, render_template, request, url_for, flash)
import os
from pathlib import Path
import argparse
import uuid
app = Flask(__name__)
app.secret_key = os.urandom(24)
unique_country_tags = ["GB", "FR", "GE", "IT", "AH", "RU", "OE"]
country_names = [
"Great Britain", "France", "German Empire", "Italy", "Austria-Hungary",
"Russia", "Ottoman Empire"
]
@app.route('/result/<id>')
def result(id):
''' The result page is shown only once countries have been assigned.
It tells the players which country has been assigned to them.
'''
with PlayerData(players_file) as player_data:
player_name = player_data.get_players_by_id()[id]["name"]
# check if assignment really over, i.e. all players submitted
all_submited = all(p["submitted"] for p in player_data.get_players())
if not all_submited:
return redirect(url_for('country_selection', id=id))
with open(output_file, "r") as file:
for line in file.readlines():
# remove player number, then separate name from tag
player_country = line.split(":")[-1]
p_name, country_tag = player_country.split()
country_ind = unique_country_tags.index(country_tag)
if p_name == player_name:
return render_template("result.html",
player_name=player_name,
country=country_names[country_ind])
return 'ERROR: Unknown player in results'
@app.route("/<id>")
def country_selection(id):
''' Country selection screen only accesible for each individual player.
Here, they can submit their priorities.
'''
with PlayerData(players_file) as player_data:
# check if player id correct
player = player_data.get_players_by_id().get(id)
if player is None:
return 'ERROR: Unknown player in country selection'
# load priorities
priorities = [player["prio1"], player["prio2"], player["prio3"]]
already_submitted = player["submitted"]
if already_submitted:
# check if assignment already over, i.e. all players submitted
all_submited = all(p["submitted"]
for p in player_data.get_players())
if all_submited:
return redirect(url_for('result', id=id))
return render_template("country_selection.html",
id=id,
player_name=player["name"],
tags=unique_country_tags,
country_names=country_names,
priorities=priorities,
submitted=already_submitted,
submission_count=sum(
p["submitted"]
for p in player_data.get_players()),
zip=zip)
@app.route("/")
def home():
return "Please use your unique access link."
@app.route('/search', methods=['GET'])
def priorities_submitted():
''' Redirection link that processes the country selection and passes to
either the result page or the selection screen.
'''
prio1 = request.args.get('prio1')
prio2 = request.args.get('prio2')
prio3 = request.args.get('prio3')
id = request.args.get('id')
# check for empty or duplicate entries
priorities = [prio1, prio2, prio3]
for p in priorities:
if p == "":
flash(
"No country selected, please choose one country for each priority!"
)
return redirect(url_for('country_selection', id=id))
if priorities.count(p) > 1:
flash(
"Duplicate entries, please select different countries for each priority!"
)
return redirect(url_for('country_selection', id=id))
with PlayerData(players_file) as player_data:
players = player_data.get_players_by_id()
# set status to submitted
players[id]["submitted"] = True
players[id]["prio1"] = prio1
players[id]["prio2"] = prio2
players[id]["prio3"] = prio3
players = [dict for _, dict in players.items()]
# check if all players have submitted
for p in players:
if not p["submitted"]:
return redirect(url_for('country_selection', id=id))
# country assignment
assign_countries_by_priority(players_file)
print("Countries have been assigned.")
return redirect(url_for('result', id=id))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
'Starts a local webserver for the diplomacy game country selection.')
parser.add_argument(
'--json',
help='Storage json file for the player data (default: %(default)s)',
type=str,
default="player_priorities.json")
parser.add_argument(
'--out',
help='Text file to store the result (default: %(default)s)',
type=str,
default="result.txt")
parser.add_argument('--port',
help='Webserver port (default: %(default)s)',
type=int,
default=5000)
parser.add_argument('--id-gen',
help='Generate new player IDs (default: %(default)s)',
action='store_true',
default=False)
parser.add_argument(
'--reset',
help=
'Delete all player selections, make empty country slots instead (default: %(default)s)',
action='store_true',
default=False)
args = parser.parse_args()
players_file = Path(args.json)
output_file = Path(args.out)
with PlayerData(players_file) as player_data:
# create player ids in json
players = player_data.get_players()
for p in players:
if args.reset:
# reset player choices
p["prio1"] = ""
p["prio2"] = ""
p["prio3"] = ""
p["submitted"] = False
if len(p["id"]) == 0 or args.id_gen:
# generate new player id
p["id"] = str(uuid.uuid4())
print("Starting webserver ...")
app.run(port=args.port, threaded=False, processes=1, host="::")
|
python
|
from pynterviews.mutants import mutants
def test_positive():
dna = ["CTGAGA",
"CTGAGC",
"TATTGT",
"AGAGAG",
"CCCCTA",
"TCACTG"]
result = mutants(dna)
assert result
def test_negative():
dna = ["CTGAGA",
"CTGAGC",
"TATTGT",
"AGAGAG",
"CCCATA",
"TCACTG"]
result = mutants(dna)
assert not result
def test_empty():
dna = []
result = mutants(dna)
assert not result
def test_none():
dna = None
result = mutants(dna)
assert not result
def test_large():
dna = ["CTGAGADSFFGAGACTGAGACTGAGACTGAGACTGAGAGAGAC",
"CTGAGCTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACT",
"CTGAGACTGAGACTGAGCTGAGACTGAGACTGAGCTGAGACTG",
"AGACTGAGACTGAGACTGCTGAGACTGAGACTCTGAGACTGAG",
"CTGAGACTGAGCCCCTGAGACTGAGACTGCTGAGACTGAGACD",
"TCACTGCTGAGACTGAGACTGAGCTGAGACTGAGACTGACTGA",
"CTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACTGAGAC",
"ETGAGCTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACT",
"CTGAGACTGAGACTGAGCTGAGACTGAGACTGAGCTGAGACTG",
"AGACTGAGACTGAGACTGCTGAGACTGAGACTCTGAGACTGAG",
"CTGAGACTGAGACTGCTGAGACTGAGACTGCTGAGACTGAGAC",
"TCACTGCTGAGACTGAGACTGAGCTGAGACTGAGACTGACTGA"]
result = mutants(dna)
#TODO: assert not result
|
python
|
def Bisection_Method(equation, a, b, given_error): # function, boundaries, Es
li_a = deque() # a
li_b = deque() # b
li_c = deque() # x root -> c
li_fc = deque() # f(xr)
li_fa = deque() # f(a)
li_fb = deque() # f(b)
li_Ea = deque() # estimated error
data = {
'Xl': li_a,
'Xu': li_b,
'Xr': li_c,
'f(Xl)': li_fa,
'f(Xu)': li_fb,
'f(Xr)': li_fc,
'Ea%': li_Ea,
}
global c
def f(x):
F = eval(equation) # the x here when we f(a) a will be instead of x
return F
# substitute boundaries in function
if f(a)*f(b) >= 0:
print('Error', 'Bisection method is fail')
quit()
# elif we have a different sign
else:
Estimated_Error = 0
while Estimated_Error/100 <= given_error:
c = (a + b) / 2
if Estimated_Error == 0:
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append(None)
pass
if f(a)*f(c) < 0:
b = c
c1 = (a + b)/2
Estimated_Error = abs((c1 - c)/c1) * 100 # b became the old root and c1 became the new root ((current - previous)/current) * 100
elif f(b)*f(c) < 0:
a = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100
else:
print('Error', 'something is wrong!')
else:
while Estimated_Error/100 >= given_error:
c = (a + b) / 2
#append data to to the list
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append('%.5f' % Estimated_Error+'%')
if f(a) * f(c) < 0:
b = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100 # b became the old root and c1 became the new root ((current - previous)/current) * 100
elif f(b) * f(c) < 0:
a = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100
else:
print('Error', 'something is wrong!')
else:
c = (b + a)/2
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append('%.5f' % Estimated_Error+'%')
print(tabulate(data, headers='keys', tablefmt='fancy_grid', showindex=True))
if __name__ == '__main__':
from tabulate import tabulate
from collections import deque
print('\n The first case👇 \n')
Bisection_Method('(5 * x ** 3) - (5 * x ** 2) + 6 * x - 2', 0, 1, 10/100)
print('\n The second case👇 \n')
Bisection_Method('(5 * x ** 3) - (5 * x ** 2) + 6 * x - 2', 0, 5, 10 / 100)
|
python
|
from django.urls import path
app_name = 'profiles'
urlpatterns = []
|
python
|
import os
if os.getenv('HEROKU') is not None:
from .prod import *
elif os.getenv('TRAVIS') is not None:
from test import *
else:
from base import *
|
python
|
"""
1. Верхняя одежда
1.1. #куртки
1.2. #кофты
1.3. #майки
1.4. #футболки
1.5. #рубашки
1.6. #шапки
1.7. #кепки
2. Нижняя одежда
2.1. #брюки
2.2. #шорты
2.3. #ремни
2.4. #болье
2.5. #носки
3. Костюмы
3.1. #спортивные
3.2. #класические
4. Обувь
4.1. #красовки
4.2. #кеды
4.3. #ботинки
4.4. #туфли
5.Аксесуары
5.1. #рюкзаки
5.2. #сумки
5.3. #очки
5.4. #духи
5.5. #зонты
"""
|
python
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic system module, executing statements on local node
"""
from subprocess import check_output
from ovs.plugin.provider.configuration import Configuration
class System(object):
"""
Generic helper class
"""
my_machine_id = ''
my_storagerouter_guid = ''
my_storagedriver_id = ''
def __init__(self):
"""
Dummy init method
"""
_ = self
@staticmethod
def get_my_machine_id(client=None):
"""
Returns unique machine id based on mac address
"""
if not System.my_machine_id:
ip_path = Configuration.get('ovs.core.ip.path')
if ip_path is None:
ip_path = "`which ip`"
cmd = """{0} a | grep link/ether | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | sed 's/://g' | sort""".format(ip_path)
if client is None:
output = check_output(cmd, shell=True).strip()
else:
output = client.run(cmd).strip()
for mac in output.split('\n'):
if mac.strip() != '000000000000':
System.my_machine_id = mac.strip()
break
return System.my_machine_id
@staticmethod
def get_my_storagerouter():
"""
Returns unique machine storagerouter id
"""
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.lists.storagerouterlist import StorageRouterList
if not System.my_storagerouter_guid:
for storagerouter in StorageRouterList.get_storagerouters():
if storagerouter.machine_id == System.get_my_machine_id():
System.my_storagerouter_guid = storagerouter.guid
return StorageRouter(System.my_storagerouter_guid)
@staticmethod
def get_my_storagedriver_id(vpool_name):
"""
Returns unique machine storagedriver_id based on vpool_name and machineid
"""
return vpool_name + System.get_my_machine_id()
@staticmethod
def update_hosts_file(hostname, ip):
"""
Update/add entry for hostname ip in /etc/hosts
"""
import re
with open('/etc/hosts', 'r') as hosts_file:
contents = hosts_file.read()
if isinstance(hostname, list):
hostnames = ' '.join(hostname)
else:
hostnames = hostname
result = re.search('^{0}\s.*\n'.format(ip), contents, re.MULTILINE)
if result:
contents = contents.replace(result.group(0), '{0} {1}\n'.format(ip, hostnames))
else:
contents += '{0} {1}\n'.format(ip, hostnames)
with open('/etc/hosts', 'wb') as hosts_file:
hosts_file.write(contents)
@staticmethod
def exec_remote_python(client, script):
"""
Executes a python script on a client
"""
return client.run('python -c """{0}"""'.format(script))
@staticmethod
def read_remote_config(client, key):
"""
Reads remote configuration key
"""
read = """
from ovs.plugin.provider.configuration import Configuration
print Configuration.get('{0}')
""".format(key)
return System.exec_remote_python(client, read)
@staticmethod
def ports_in_use(client=None):
"""
Returns the ports in use
"""
cmd = """netstat -ln4 | sed 1,2d | sed 's/\s\s*/ /g' | cut -d ' ' -f 4 | cut -d ':' -f 2"""
if client is None:
output = check_output(cmd, shell=True).strip()
else:
output = client.run(cmd).strip()
for found_port in output.split('\n'):
yield int(found_port.strip())
|
python
|
import os
import pandas as pd
os.chdir('/Users/forrestbadgley/Documents/DataScience/git/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyPoll/raw_data')
csv_path = "election_data_1.csv"
csv_path2 = "election_data_2.csv"
elect1_df = pd.read_csv(csv_path)
elect2_df = pd.read_csv(csv_path2)
#vertical stack of two dataframes
elect3_df = pd.concat([elect1_df, elect2_df], axis=0)
total_votes_cast = elect3_df['Voter ID'].value_counts(dropna=True)
elect3_df['Candidate']= elect3_df['Candidate']
candidates_list = elect3_df['Candidate'].unique()
elect3_group = elect3_df.groupby(['Candidate']).count()
total_votes_cast2=elect3_group['Voter ID'].sum()
elect3_group['Decimal']=((elect3_group['Voter ID']/total_votes_cast2)*100).round(2)
print("Election Results")
print("-----------------")
print("Total Votes: " + (str(total_votes_cast2)))
print("-----------------")
print(elect3_group.iloc([1]))
|
python
|
import re
from pyhanlp import *
# def Tokenizer(sent, stopwords=None):
# pat = re.compile(r'[0-9!"#$%&\'()*+,-./:;<=>?@—,。:★、¥…【】()《》?“”‘’!\[\\\]^_`{|}~\u3000]+')
# tokens = [t.word for t in HanLP.segment(sent)]
# tokens = [re.sub(pat, r'', t).strip() for t in tokens]
# tokens = [t for t in tokens if t != '']
#
# if stopwords is not None:
# tokens = [t for t in tokens if not (t in stopwords)]
# return tokens
def Tokenizer(sent, stopwords=None):
tokens = sent.split()
del tokens[0]
tokens = list(filter(lambda token: token != '', tokens))
#tokens = list(filter(lambda token: len(tokens) > 3, tokens))
if stopwords is not None:
tokens = [t for t in tokens if not (t in stopwords)]
return tokens
# def Tokenizer(sent,stopwords=None):
# # Tokenizer for English.
# pat = re.compile(r'[0-9!"#$%&\'()*+,-./:;<=>?@—,。:★、¥…【】()《》?“”‘’!\[\\\]^_`{|}~\u3000]+')
# tokens = [re.sub(pat,r'',t).strip() for t in sent.split(' ')]
# tokens = [t for t in tokens if t != '']
# from nltk.stem import WordNetLemmatizer
# wnl = WordNetLemmatizer()
# tokens = [wnl.lemmatize(t).lower() for t in tokens]
# if stopwords is not None:
# tokens = [t for t in tokens if not (t in stopwords)]
# return tokens
if __name__ == '__main__':
print(Tokenizer('他拿的是《红楼梦》?!我还以为他是个Foreigner———'))
|
python
|
import numpy as np
from seisflows.tools.array import uniquerows
from seisflows.tools.code import Struct
from seisflows.tools.io import BinaryReader, mychar, mysize
from seisflows.seistools.shared import SeisStruct
from seisflows.seistools.segy.headers import \
SEGY_TAPE_LABEL, SEGY_BINARY_HEADER, SEGY_TRACE_HEADER
NMAX = 100000
FIXEDLENGTH = True
SAVEHEADERS = True
COORDSCALAR = 1.
DEPTHSCALAR = 1.
FIELDS = [
'TraceSequenceLine',
'SourceWaterDepth',
'GroupWaterDepth',
'ElevationOrDepthScalar',
'CoordinateScalar',
'SourceX',
'SourceY',
'GroupX',
'GroupY',
'RecordingDelay_ms',
'NumberSamples',
'SampleInterval_ms']
# cull header fields
_tmp = []
for field in SEGY_TRACE_HEADER:
if field[-1] in FIELDS:
_tmp.append(field)
SEGY_TRACE_HEADER = _tmp
class SeismicReader(BinaryReader):
""" Base class used by both SegyReader and SuReader
"""
def ReadSeismicData(self):
nsamples = int(self.read('int16', 1, self.offset + 114)[0])
nbytes = int(nsamples*self.dsize + 240)
ntraces = int((self.size - self.offset)/nbytes)
# prepare offset pointers
if FIXEDLENGTH:
tracelen = [nsamples]*ntraces
traceptr = [nbytes*i + self.offset for i in range(ntraces)]
else:
ntraces = 1
tracelen = []
traceptr = [self.offset]
while 1:
ntraces += 1
nsamples = int(self.read('int16', 1, traceptr[-1] + 114)[0])
nbytes = nsamples*self.dsize + 240
tracelen.append(nsamples)
traceptr.append(traceptr[-1] + nbytes)
if ntraces > NMAX:
raise Exception
elif traceptr[-1] >= self.size:
raise Exception
traceptr = traceptr[:-1]
tracelen = tracelen[:-1]
# preallocate trace headers
if SAVEHEADERS:
h = [self.scan(SEGY_TRACE_HEADER, traceptr[0], contiguous=False)]
h = h*ntraces
else:
h = []
# preallocate data array
if FIXEDLENGTH:
d = np.zeros((nsamples, ntraces))
else:
d = np.zeros((tracelen.max(), len(traceptr)))
# read trace headers and data
for k in range(ntraces):
if SAVEHEADERS:
h[k] = self.scan(SEGY_TRACE_HEADER, traceptr[k],
contiguous=False)
d[:, k] = self.read(self.dtype, nsamples, traceptr[k] + 240)
# store results
self.ntraces = ntraces
self.hdrs = h
self.data = d
def getstruct(self):
nr = self.ntraces
# collect scalars
nt = self.getscalar('NumberSamples')
ts = self.getscalar('RecordingDelay_ms')
dt = self.getscalar('SampleInterval_ms')
# collect arrays
sx = self.getarray('SourceX')
sy = self.getarray('SourceY')
sz = self.getarray('SourceWaterDepth')
rx = self.getarray('GroupX')
ry = self.getarray('GroupY')
rz = self.getarray('GroupWaterDepth')
# apply scaling factors
if COORDSCALAR and DEPTHSCALAR:
c1 = COORDSCALAR
c2 = DEPTHSCALAR
c3 = 1.e-6
else:
c1 = self.getscalar('CoordinateScalar')
c2 = self.getscalar('ElevationOrDepthScalar')
c3 = 1.e-6
sxyz = np.column_stack([sx, sy, sz])
rxyz = np.column_stack([rx, ry, rz])
nsrc = len(uniquerows(sxyz))
nrec = len(uniquerows(rxyz))
return SeisStruct(nr, nt, dt, ts,
c1*sx, c1*sy, c2*sz,
c1*rx, c1*ry, c2*rz,
nsrc, nrec)
def getarray(self, key):
# collect array
list = [hdr[key] for hdr in self.hdrs]
return np.array(list)
def getscalar(self, key):
# collect scalar
array = self.getarray(key)
return array[0]
class SegyReader(SeismicReader):
""" SEGY reader
"""
def __init__(self, fname, endian=None):
SeismicReader.__init__(self, fname, endian)
self.dtype = 'float'
self.dsize = mysize(self.dtype)
self.offset = 0
# check byte order
if endian:
self.endian = endian
else:
raise ValueError("SU Reader should specify the endianness")
def ReadSegyHeaders(self):
# read in tape label header if present
code = self.read('char', 2, 4)
if code == 'SY':
tapelabel = file.scan(SEGY_TAPE_LABEL, self.offset)
self.offset += 128
else:
tapelabel = 'none'
# read textual file header
self.segyTxtHeader = self.read('char', 3200, self.offset)
self.offset += 3200
# read binary file header
self.segyBinHeader = self.scan(SEGY_BINARY_HEADER, self.offset)
self.offset += 400
# read in extended textual headers if present
self.CheckSegyHeaders()
def CheckSegyHeaders(self):
# check revision number
self.segyvers = '1.0'
# check format code
self.segycode = 5
# check trace length
if FIXEDLENGTH:
assert bool(self.segyBinHeader.FixedLengthTraceFlag) == bool(
FIXEDLENGTH)
class SuReader(SeismicReader):
""" Seismic Unix file reader
"""
def __init__(self, fname, endian=None):
SeismicReader.__init__(self, fname, endian)
self.dtype = 'float'
self.dsize = mysize(self.dtype)
self.offset = 0
# check byte order
if endian:
self.endian = endian
else:
raise ValueError("SU Reader should specify the endianness")
def readsegy(filename):
""" SEGY convenience function
"""
obj = SegyReader(filename, endian='>')
obj.ReadSegyHeaders()
obj.ReadSeismicData()
d = obj.data
h = obj.getstruct()
return d, h
def readsu(filename):
""" SU convenience function
"""
obj = SuReader(filename, endian='<')
obj.ReadSeismicData()
d = obj.data
h = obj.getstruct()
return d, h
|
python
|
import sys
import torch
import numpy as np
sys.path.append('../')
from models import networks
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model-in-file',help='file path to generator model to export (.pth file)',required=True)
parser.add_argument('--model-out-file',help='file path to exported model (.pt file)')
parser.add_argument('--model-type',default='mobile_resnet_9blocks',help='model type, e.g. mobile_resnet_9blocks')
parser.add_argument('--img-size',default=256,type=int,help='square image size')
parser.add_argument('--cpu',action='store_true',help='whether to export for CPU')
parser.add_argument('--bw',action='store_true',help='whether input/output is bw')
args = parser.parse_args()
if not args.model_out_file:
model_out_file = args.model_in_file.replace('.pth','.pt')
else:
model_out_file = args.model_out_file
if args.bw:
input_nc = output_nc = 1
else:
input_nc = output_nc = 3
ngf = 64
use_dropout = False
decoder = True
img_size = args.img_size
model = networks.define_G(input_nc,output_nc,ngf,args.model_type,'instance',use_dropout,
decoder=decoder,
img_size=args.img_size,
img_size_dec=args.img_size)
if not args.cpu:
model = model.cuda()
model.eval()
model.load_state_dict(torch.load(args.model_in_file))
if args.cpu:
device = 'cpu'
else:
device = 'cuda'
dummy_input = torch.randn(1, input_nc, args.img_size, args.img_size, device=device)
jit_model = torch.jit.trace(model, dummy_input)
jit_model.save(model_out_file)
|
python
|
def parse_line(line, extraction_map):
print("---------parsing line------")
key = get_key_for_line(line)
extraction_guide = extraction_map[key]
obj = get_blank_line_object()
flag = special_line_case(key)
answer_flag = special_answer_case(key)
if (flag == True):
line = escape_underscore(key, line)
if (answer_flag == True or key == "answerQuestion.userClick.NA"):
line = escape_parenth(line)
if (answer_flag == True):
semi_final_line = replace_all_delimeters_with_commas_after_field_6_and_answer_field(key, line)
else:
semi_final_line = replace_all_delimeters_with_commas_after_field_6(line)
# get rid of ignored data at end of line so can compare field counts.
almost_final_line = semi_final_line.replace(",false,false,false,false,false,false\n", "")
final_line = almost_final_line.replace(",false,false,false,false,false,false", "")
guide_parts = extraction_guide.split(",")
final_line_parts = final_line.split(",")
if (len(guide_parts) != len(final_line_parts)):
print("ERROR - guide field count {} line field count {}".format(len(guide_parts),len(final_line_parts)))
print("original line : {}".format(line))
print("all commas line : {}".format(final_line))
print("extraction guide : {}".format(extraction_guide))
raise SystemExit
field_count = len(guide_parts)
for i in range(field_count):
col_name = guide_parts[i]
if ("NOTE_PRESENCE" in col_name):
col_name_parts = col_name.split(">")
true_col_name = col_name_parts[1]
obj[true_col_name] = "yes"
elif (col_name == "OMIT"):
# skip this one
continue
else:
unescaped_value = unescape_all(final_line_parts[i])
print("colname {} gets val {}".format(col_name, unescaped_value))
obj[col_name] = unescaped_value
return obj
def replace_all_delimeters_with_commas_after_field_6(line):
fields = line.split(",")
# go through each field, after the first 6
new_string = ""
for i in range(len(fields)):
if (i == 0):
new_string = "{}".format(fields[i])
elif (i < 6):
# copy without changing
new_string = "{},{}".format(new_string, fields[i])
else:
# replace delims
new_string = "{},{}".format(new_string, replace_all_delimeters_with_commas(fields[i]))
return new_string
def replace_all_delimeters_with_commas(line):
no_under_and_left_parens = line.replace("_(", ",")
no_colons = no_under_and_left_parens.replace(":",",")
no_semicolons = no_colons.replace(";", ",")
no_underscores = no_semicolons.replace("_",",")
no_left_parens = no_underscores.replace("(",",")
no_right_parens = no_left_parens.replace(")","")
return no_right_parens
def get_key_for_line(line):
key = "UNKNOWN"
fields = line.split(',')
print("{}".format(line))
if ("userClick" in line):
key = get_key_for_user_click_line(line)
elif ("startMouseOverSaliencyMap" in line):
key = "startMouseOverSaliencyMap"
elif ("endMouseOverSaliencyMap" in line):
key = "endMouseOverSaliencyMap"
elif ("waitForResearcherStart" in line):
key = "waitForResearcherStart"
elif ("waitForResearcherEnd" in line):
key = "waitForResearcherEnd"
else:
# uses primary discriminator as key
field = fields[6]
subfields = field.split(';')
subfield0 = subfields[0]
subsubfields = subfield0.split(':')
key = subsubfields[0]
return key
def get_key_for_user_click_line(line):
key = "UNKNOWN"
if ("answerQuestion" in line):
#need to look into the saved off click
if ("(NA)" in line):
key = "answerQuestion.userClick.NA"
elif ("clickEntity" in line):
key = "answerQuestion.userClick.clickEntity"
elif ("selectedRewardBar" in line):
key = "answerQuestion.userClick.selectedRewardBar"
elif ("clickSaliencyMap" in line):
key = "answerQuestion.userClick.clickSaliencyMap"
else:
# use secondary discriminator as key
fields = line.split(',')
field = fields[6]
subfields = field.split(';')
subfield3 = subfields[3]
subsubfields = subfield3.split(':')
key = subsubfields[0]
if (key == "NA"):
key = "userClick"
return key
def special_line_case(key):
if (key == "clickSaliencyMap" or key == "startMouseOverSaliencyMap" or key == "endMouseOverSaliencyMap"):
return True
else:
return False
def special_answer_case(key):
if (key == "answerQuestion.userClick.clickEntity" or key == "answerQuestion.userClick.selectedRewardBar" or key == "answerQuestion.userClick.clickSaliencyMap"):
return True
else:
return False
def unescape_all(s):
#with_comma = with_underscore.replace("ESCAPED-COMMA", ",")
#with_newline = with_comma.replace("ESCAPED-NEWLINE", "\n")
with_underscore = s.replace("ESCAPED-UNDERSCORE", "_")
with_colon = with_underscore.replace("ESCAPED-COLON", ":")
with_semicolon = with_colon.replace("ESCAPED-SEMICOLON", ";")
with_left_parenth = with_semicolon.replace("ESCAPED-LEFT-PARENTH", "(")
with_right_parenth = with_left_parenth.replace("ESCAPED-RIGHT-PARENTH", ")")
return with_right_parenth
def escape_underscore(key, line):
if (key == "clickSaliencyMap"):
fields = line.split(',')
field = fields[6]
subfields = field.split(';')
subfield2 = subfields[2]
subsubfields = subfield2.split(':')
target_replace = subsubfields[1]
new_target_replace = target_replace.replace("_", "ESCAPED-UNDERSCORE")
subsubfields[1] = new_target_replace
new_subsubfields = ':'.join([str(i) for i in subsubfields])
subfields[2] = new_subsubfields
new_subfields = ';'.join([str(j) for j in subfields])
fields[6] = new_subfields
new_line = ','.join([str(k) for k in fields])
return new_line
else:
new_line = line.replace("_", "ESCAPED-UNDERSCORE")
return new_line
def escape_parenth (line):
fields = line.split(",")
field = fields[6]
subfields = field.split(';')
subfield3 = subfields[3]
subsubfields = subfield3.split(':')
answer_fields = subsubfields[1]
answer_subfields = answer_fields.split('_')
answer_one = answer_subfields[1]
answer_two = answer_subfields[2]
new_answer_one = answer_one.replace("(", "ESCAPED-LEFT-PARENTH")
new_answer_two = answer_two.replace("(", "ESCAPED-LEFT-PARENTH")
new_new_answer_one = new_answer_one.replace(")", "ESCAPED-RIGHT-PARENTH")
new_new_answer_two = new_answer_two.replace(")", "ESCAPED-RIGHT-PARENTH")
answer_subfields[1] = new_new_answer_one
answer_subfields[2] = new_new_answer_two
new_answer_fields = '_'.join([str(h) for h in answer_subfields])
subsubfields[1] = new_answer_fields
new_subfield3 = ':'.join([str(i) for i in subsubfields])
subfields[3] = new_subfield3
new_field = ';'.join([str(j) for j in subfields])
fields[6] = new_field
new_line = ','.join([str(k) for k in fields])
return new_line
def replace_all_delimeters_with_commas_after_field_6_and_answer_field(key, line):
entries = line.split('_(', 1)
start_of_click_answer_entry = entries[1]
find_end_of_click_answer = start_of_click_answer_entry.split(')')
answer_entry = find_end_of_click_answer[0]
button_save_info = entries[0]
if (key == "answerQuestion.userClick.clickSaliencyMap"):
answer_entry = escape_underscore("clickSaliencyMap", answer_entry)
new_string = replace_all_delimeters_with_commas_after_field_6(button_save_info)
new_answer_string = replace_all_delimeters_with_commas_after_field_6(answer_entry)
new_new_string = new_string + ',' + new_answer_string
return new_new_string
def get_blank_line_object():
obj = {}
obj["fileName"] = "NA"
obj["date"] = "NA"
obj["time"] = "NA"
obj["1970Sec"] = "NA"
obj["decisionPoint"] = "NA"
obj["questionId"] = "NA"
obj["stepIntoDecisionPoint"] = "NA"
obj["showQuestion"] = "NA"
obj["hideEntityTooltips"] = "NA"
obj["showEntityTooltip.entityInfo"] = "NA"
obj["showEntityTooltip.tipQuadrant"] = "NA"
obj["startMouseOverSaliencyMap"] = "NA"
obj["endMouseOverSaliencyMap"] = "NA"
obj["waitForResearcherStart"] = "NA"
obj["waitForResearcherEnd"] = "NA"
obj["userClick"] = "NA"
obj["userClick.coordX"] = "NA"
obj["userClick.coordY"] = "NA"
obj["userClick.region"] = "NA"
obj["userClick.target"] = "NA"
obj["userClick.answerQuestion.clickStep"] = "NA"
obj["userClick.answerQuestion.questionId"] = "NA"
obj["userClick.answerQuestion.answer1"] = "NA"
obj["userClick.answerQuestion.answer2"] = "NA"
obj["userClick.answerQuestion.userClick"] = "NA"
obj["userClick.answerQuestion.userClick.fileName"] = "NA"
obj["userClick.answerQuestion.userClick.date"] = "NA"
obj["userClick.answerQuestion.userClick.time"] = "NA"
obj["userClick.answerQuestion.userClick.1970Sec"] = "NA"
obj["userClick.answerQuestion.userClick.decisionPoint"] = "NA"
obj["userClick.answerQuestion.userClick.questionId"] = "NA"
obj["userClick.answerQuestion.userClick.coordX"] = "NA"
obj["userClick.answerQuestion.userClick.coordY"] = "NA"
obj["userClick.answerQuestion.userClick.region"] = "NA"
obj["userClick.answerQuestion.userClick.target"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.clickGameEntity"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.clickQuadrant"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.coordX"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.coordY"] = "NA"
obj["userClick.answerQuestion.userClick.selectedRewardBar"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap.clickGameEntity"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap.clickQuadrant"] = "NA"
obj["userClick.timelineClick"] = "NA"
obj["userClick.jumpToDecisionPoint"] = "NA"
obj["userClick.clickTimeLineBlocker"] = "NA"
obj["userClick.play"] = "NA"
obj["userClick.pause"] = "NA"
obj["userClick.touchStepProgressLabel"] = "NA"
obj["userClick.clickGameQuadrant"] = "NA"
obj["userClick.clickEntity.clickGameEntity"] = "NA"
obj["userClick.clickEntity.clickQuadrant"] = "NA"
obj["userClick.clickEntity.coordX"] = "NA"
obj["userClick.clickEntity.coordY"] = "NA"
obj["userClick.clickActionLabel"] = "NA"
obj["userClick.clickActionLabelDenied"] = "NA"
obj["userClick.selectedRewardBar"] = "NA"
obj["userClick.clickSaliencyMap"] = "NA"
obj["userClick.clickSaliencyMap.clickGameEntity"] = "NA"
obj["userClick.clickSaliencyMap.clickQuadrant"] = "NA"
obj["userClick.touchCumRewardLabel"] = "NA"
obj["userClick.touchCumRewardValueFor"] = "NA"
return obj
|
python
|
from matplotlib.offsetbox import AnchoredText
import numpy as np
import matplotlib.pyplot as plt
from iminuit import Minuit, describe
from iminuit.util import make_func_code
class Chi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
class EffVarChi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
self.h = (x[-1] - x[
0]) / 10000 # this is the step size for the numerical calculation of the df/dx = last value in x (x[-1]) - first value in x (x[0])/10000
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
df = (self.model(self.x + self.h, *par) - self.ym) / self.h # the derivative df/dx at point x is taken as [f(x+h)-f(x)]/h
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2 + (df * self.dx) ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2+(df/dx*uncert_x)^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
if __name__ == "__main__":
np.random.seed(42)
X = np.linspace(1,6,5)
dX = 0.1 * np.ones(len(X))
y = 2*X + np.random.randn(len(X))
dy = abs(np.random.randn(len(X)))
fun = lambda X,a,b: a*X + b
reg = Chi2Reg(fun,X,y,dX,dy)
opt = Minuit(reg)
opt.migrad()
reg.show(opt)
plt.show()
|
python
|
import socket
HOST, PORT = "localhost", 9999
msg = b'\x16\x04\x04\x01\xfd 94193A04010020B8'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("localhost", 33000))
s.sendto(msg, (HOST, PORT))
|
python
|
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
Inventory,
Actor,
Status
)
inventory_test_data = {}
inventory_tests = [
##----TEST 0----##
# creates 6 actors
# creates 2 statuses
# creates an inventory with 3 of the actors and a status
# gets it
# updates inventory with 3 other actors and the other status
# gets it
# deletes it
# gets it (should error)
[
*[{
'name': name,
'method': POST,
'endpoint': 'actor-list',
'body': random_model_dict(Actor),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST,
}
}
} for name in ['owner0','operator0','lab0','owner1','operator1','lab1']
],
*[{
'name': name,
'method': POST,
'endpoint': 'status-list',
'body': random_model_dict(Status),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST,
}
}
} for name in ['status0','status1']
],
{
'name': 'inventory',
'method': POST,
'endpoint': 'inventory-list',
'body': (request_body := random_model_dict(Inventory,
owner='owner0__url',
operator='operator0__url',
lab='lab0__url',
status='status0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body,
}
}
},
{
'name': 'inventory_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET,
}
}
},
{
'name': 'inventory_update',
'method': PUT,
'endpoint': 'inventory-detail',
'body': (request_body := random_model_dict(Inventory,
owner='owner1__url',
operator='operator1__url',
lab='lab1__url',
status='status1__url')),
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'inventory_update_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET,
}
}
},
{
'name': 'inventory_update_del',
'method': DELETE,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE,
}
}
},
{
'name': 'inventory_update_del_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR,
}
}
},
],
]
|
python
|
from utils import utils
day = 18
tD = """
2 * 3 + (4 * 5)
5 + (8 * 3 + 9 + 3 * 4 * 3)
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2
"""
tA1 = 26 + 437 + 12240 + 13632
tA2 = 46 + 1445 + 669060 + 23340
class Calculator:
def __init__(self, pattern):
self.pattern = pattern
def findFirstBrackets(self, line):
# Find those brackets
bracketStack = list()
for i in range(len(line)):
c = line[i]
if c == "(":
bracketStack.append(i)
elif c == ")":
j = bracketStack.pop()
return (j, i)
return None
def findSum(self, line, charSet):
found = False
startingPointer = 0
for i in range(len(line)):
c = line[i]
# Is this the end?
if not c.isdigit() and found:
return (startingPointer, i)
# Is this the start?
elif c in charSet and not found:
found = True
elif not c.isdigit():
startingPointer = i + 1
# if we found a digit, but reached the end, we still have maths to do
return (startingPointer, len(line)) if found else None
def solve(self, line, charset):
sumRange = self.findSum(line, charset)
while sumRange != None:
result = eval(line[sumRange[0]:sumRange[1]])
line = str(result).join([line[:sumRange[0]], line[sumRange[1]:]])
sumRange = self.findSum(line, charset)
return line
def calculate(self, line, charset):
line = line.strip().replace(" ", "")
brackets = self.findFirstBrackets(line)
while brackets != None:
partialLine = line[brackets[0]+1:brackets[1]]
partial = self.calculateLine(partialLine)
line = str(partial).join([line[:brackets[0]], line[brackets[1]+1:]])
brackets = self.findFirstBrackets(line)
return self.solve(line, charset)
def calculateLine(self, line):
for charset in self.pattern:
line = self.calculate(line, charset)
return int(line)
def sumData(self, data):
return sum(self.calculateLine(l) for l in data)
def test():
assert Calculator(["+*"]).sumData(utils.load_test_data(tD)) == tA1
assert Calculator(["+", "*"]).sumData(utils.load_test_data(tD)) == tA2
return "Pass!"
if __name__ == "__main__":
def process_data(d): return d
def partOne(d): return Calculator(["+*"]).sumData(d)
def partTwo(d): return Calculator(["+", "*"]).sumData(d)
utils.run(day, process_data, test, partOne, partTwo)
|
python
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# 核心思路
# 这题是 add_two_numbers_II_q445.py 的特例版本,做法更简单
# 另,这题没说不能修改原链表,所以可以先reverse,变成低位在前
# 处理之后再 reverse 回去
class Solution(object):
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
pnc = p = dummy
while p.next:
if p.val != 9:
pnc = p
p = p.next
val = p.val + 1
if val > 9:
p.val = 0
pnc.val += 1
while pnc.next != p:
pnc.next.val = 0
pnc = pnc.next
else:
p.val = val
return dummy.next if dummy.val == 0 else dummy
|
python
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete a job store used by a previous Toil workflow invocation."""
import logging
from toil.common import Toil, parser_with_common_options
from toil.jobStores.abstractJobStore import NoSuchJobStoreException
from toil.statsAndLogging import set_logging_from_options
logger = logging.getLogger(__name__)
def main():
parser = parser_with_common_options(jobstore_option=True)
options = parser.parse_args()
set_logging_from_options(options)
try:
jobstore = Toil.getJobStore(options.jobStore)
jobstore.resume()
jobstore.destroy()
logger.info(f"Successfully deleted the job store: {options.jobStore}")
except NoSuchJobStoreException:
logger.info(f"Failed to delete the job store: {options.jobStore} is non-existent.")
except:
logger.info(f"Failed to delete the job store: {options.jobStore}")
raise
|
python
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.clip import GradientClipByGlobalNorm
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from seq2seq_dygraph_model import BaseModel, AttentionModel
from seq2seq_utils import Seq2SeqModelHyperParams
from seq2seq_utils import get_data_iter
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
program_translator = ProgramTranslator()
STEP_NUM = 10
PRINT_STEP = 2
def prepare_input(batch):
src_ids, src_mask, tar_ids, tar_mask = batch
src_ids = src_ids.reshape((src_ids.shape[0], src_ids.shape[1]))
in_tar = tar_ids[:, :-1]
label_tar = tar_ids[:, 1:]
in_tar = in_tar.reshape((in_tar.shape[0], in_tar.shape[1]))
label_tar = label_tar.reshape((label_tar.shape[0], label_tar.shape[1], 1))
inputs = [src_ids, in_tar, label_tar, src_mask, tar_mask]
return inputs, np.sum(tar_mask)
def train(args, attn_model=False):
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = 2020
fluid.default_main_program().random_seed = 2020
if attn_model:
model = AttentionModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=args.dropout)
else:
model = BaseModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=args.dropout)
gloabl_norm_clip = GradientClipByGlobalNorm(args.max_grad_norm)
optimizer = fluid.optimizer.SGD(args.learning_rate,
parameter_list=model.parameters(),
grad_clip=gloabl_norm_clip)
model.train()
train_data_iter = get_data_iter(args.batch_size)
batch_times = []
for batch_id, batch in enumerate(train_data_iter):
total_loss = 0
word_count = 0.0
batch_start_time = time.time()
input_data_feed, word_num = prepare_input(batch)
input_data_feed = [
fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed
]
word_count += word_num
loss = model(input_data_feed)
loss.backward()
optimizer.minimize(loss)
model.clear_gradients()
total_loss += loss * args.batch_size
batch_end_time = time.time()
batch_time = batch_end_time - batch_start_time
batch_times.append(batch_time)
if batch_id % PRINT_STEP == 0:
print(
"Batch:[%d]; Time: %.5f s; loss: %.5f; total_loss: %.5f; word num: %.5f; ppl: %.5f"
% (batch_id, batch_time, loss.numpy(), total_loss.numpy(),
word_count, np.exp(total_loss.numpy() / word_count)))
if attn_model:
# NOTE: Please see code of AttentionModel.
# Because diff exits if call while_loop in static graph, only run 4 batches to pass the test temporarily.
if batch_id + 1 >= 4:
break
else:
if batch_id + 1 >= STEP_NUM:
break
model_path = args.attn_model_path if attn_model else args.base_model_path
model_dir = os.path.join(model_path)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
fluid.save_dygraph(model.state_dict(), model_dir)
return loss.numpy()
def infer(args, attn_model=False):
with fluid.dygraph.guard(place):
if attn_model:
model = AttentionModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
beam_size=args.beam_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=0.0,
mode='beam_search')
else:
model = BaseModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
beam_size=args.beam_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=0.0,
mode='beam_search')
model_path = args.attn_model_path if attn_model else args.base_model_path
state_dict, _ = fluid.dygraph.load_dygraph(model_path)
model.set_dict(state_dict)
model.eval()
train_data_iter = get_data_iter(args.batch_size, mode='infer')
for batch_id, batch in enumerate(train_data_iter):
input_data_feed, word_num = prepare_input(batch)
input_data_feed = [
fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed
]
outputs = model.beam_search(input_data_feed)
break
return outputs.numpy()
class TestSeq2seq(unittest.TestCase):
def setUp(self):
self.args = Seq2SeqModelHyperParams
self.temp_dir = tempfile.TemporaryDirectory()
self.args.base_model_path = os.path.join(self.temp_dir.name,
self.args.base_model_path)
self.args.attn_model_path = os.path.join(self.temp_dir.name,
self.args.attn_model_path)
self.args.reload_model = os.path.join(self.temp_dir.name,
self.args.reload_model)
def tearDown(self):
self.temp_dir.cleanup()
def run_dygraph(self, mode="train", attn_model=False):
program_translator.enable(False)
if mode == "train":
return train(self.args, attn_model)
else:
return infer(self.args, attn_model)
def run_static(self, mode="train", attn_model=False):
program_translator.enable(True)
if mode == "train":
return train(self.args, attn_model)
else:
return infer(self.args, attn_model)
def _test_train(self, attn_model=False):
dygraph_loss = self.run_dygraph(mode="train", attn_model=attn_model)
static_loss = self.run_static(mode="train", attn_model=attn_model)
result = np.allclose(dygraph_loss, static_loss)
self.assertTrue(
result,
msg="\ndygraph_loss = {} \nstatic_loss = {}".format(dygraph_loss,
static_loss))
def _test_predict(self, attn_model=False):
pred_dygraph = self.run_dygraph(mode="test", attn_model=attn_model)
pred_static = self.run_static(mode="test", attn_model=attn_model)
result = np.allclose(pred_static, pred_dygraph)
self.assertTrue(
result,
msg="\npred_dygraph = {} \npred_static = {}".format(pred_dygraph,
pred_static))
def test_base_model(self):
self._test_train(attn_model=False)
self._test_predict(attn_model=False)
def test_attn_model(self):
self._test_train(attn_model=True)
# TODO(liym27): add predict
# self._test_predict(attn_model=True)
if __name__ == '__main__':
# switch into new eager mode
with fluid.framework._test_eager_guard():
unittest.main()
|
python
|
#! /bin/false
import weblogic
import javax.xml
import java.io.FileInputStream as fis
import java.io.FileOutputStream as fos
import os
import shutil
import java.io.BufferedReader as BR
import java.lang.System.in as Sin
import java.io.InputStreamReader as isr
import java.lang.System.out.print as jprint
import weblogic.security
#Standards are defined here
class ConfigStore:
def __init__(self, fileLocation):
factory=javax.xml.parsers.DocumentBuilderFactory.newInstance()
builder=factory.newDocumentBuilder()
input=fis(fileLocation)
self.document=builder.parse(input)
self.DOM=self.document.getDocumentElement()
def write(self, newFileLocation):
xmlFrom=javax.xml.transform.dom.DOMSource(self.document)
xmlTo=javax.xml.transform.stream.StreamResult(fos(newFileLocation))
Transformer=javax.xml.transform.TransformerFactory.newInstance().newTransformer()
Transformer.transform(xmlFrom, xmlTo)
configxml=ConfigStore("/home/andresaquino/Downloads/config/config.xml")
es=weblogic.security.internal.SerializedSystemIni.getEncryptionService("/home/andresaquino/Downloads/security")
ces=weblogic.security.internal.encryption.ClearOrEncryptedService(es)
numServers=configxml.DOM.getElementsByTagName("server").getLength()
domainName=configxml.DOM.getAttribute("name")
print "The domain found: %s has %s servers." % (domainName, numServers)
print '## Servers'
for i in range(configxml.DOM.getElementsByTagName("server").getLength()):
serverNode=configxml.DOM.getElementsByTagName("server").item(i)
name=serverNode.getAttribute("name")
print 'Server: ' + name
print '## Decrypt the JDBC passwords'
for j in range(configxml.DOM.getElementsByTagName("JDBCConnectionPool").getLength()):
poolNode=configxml.DOM.getElementsByTagName("JDBCConnectionPool").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tURL: ' + poolNode.getAttribute("URL")
print '\tDriverName: ' + poolNode.getAttribute("DriverName")
print '\tUser: ' + poolNode.getAttribute("Properties")
print '\tPassword: ' + ces.decrypt(poolNode.getAttribute("PasswordEncrypted"))
print '\tTargets: ' + poolNode.getAttribute("Targets")
print '## Decrypt the EmbeddedLDAP'
for j in range(configxml.DOM.getElementsByTagName("EmbeddedLDAP").getLength()):
poolNode=configxml.DOM.getElementsByTagName("EmbeddedLDAP").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tCredential: ' + ces.decrypt(poolNode.getAttribute("CredentialEncrypted"))
print '## Decrypt the Security Configuration'
for j in range(configxml.DOM.getElementsByTagName("SecurityConfiguration").getLength()):
poolNode=configxml.DOM.getElementsByTagName("SecurityConfiguration").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tCredential: ' + ces.decrypt(poolNode.getAttribute("CredentialEncrypted"))
print '## Decrypt the ServerStart'
for j in range(configxml.DOM.getElementsByTagName("ServerStart").getLength()):
poolNode=configxml.DOM.getElementsByTagName("ServerStart").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tUserName: ' + poolNode.getAttribute("Username")
print '\tPassword: ' + ces.decrypt(poolNode.getAttribute("PasswordEncrypted"))
|
python
|
from niaaml.classifiers.classifier import Classifier
from niaaml.utilities import MinMax
from niaaml.utilities import ParameterDefinition
from sklearn.tree import DecisionTreeClassifier as DTC
import numpy as np
import warnings
from sklearn.exceptions import ChangedBehaviorWarning, ConvergenceWarning, DataConversionWarning, DataDimensionalityWarning, EfficiencyWarning, FitFailedWarning, NonBLASDotWarning, UndefinedMetricWarning
__all__ = ['DecisionTree']
class DecisionTree(Classifier):
r"""Implementation of decision tree classifier.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Reference:
L. Breiman, J. Friedman, R. Olshen, and C. Stone, “Classification and Regression Trees”, Wadsworth, Belmont, CA, 1984.
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
See Also:
* :class:`niaaml.classifiers.Classifier`
"""
Name = 'Decision Tree Classifier'
def __init__(self, **kwargs):
r"""Initialize DecisionTree instance.
"""
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(
criterion = ParameterDefinition(['gini', 'entropy']),
splitter = ParameterDefinition(['best', 'random'])
)
self.__decision_tree_classifier = DTC()
def set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
"""
self.__decision_tree_classifier.set_params(**kwargs)
def fit(self, x, y, **kwargs):
r"""Fit DecisionTree.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
"""
self.__decision_tree_classifier.fit(x, y)
def predict(self, x, **kwargs):
r"""Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
"""
return self.__decision_tree_classifier.predict(x)
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__decision_tree_classifier.get_params()))
|
python
|
#!/usr/bin/python
#
# Script implementing the multiplicative rules from the following
# article:
#
# J.-L. Durrieu, G. Richard, B. David and C. Fevotte
# Source/Filter Model for Unsupervised Main Melody
# Extraction From Polyphonic Audio Signals
# IEEE Transactions on Audio, Speech and Language Processing
# Vol. 18, No. 3, March 2010
#
# with more details and new features explained in my PhD thesis:
#
# J.-L. Durrieu,
# Automatic Extraction of the Main Melody from Polyphonic Music Signals,
# EDITE
# Institut TELECOM, TELECOM ParisTech, CNRS LTCI
# copyright (C) 2010 Jean-Louis Durrieu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import time, os
from numpy.random import randn
from string import join
def db(positiveValue):
"""
db(positiveValue)
Returns the decibel value of the input positiveValue
"""
return 10 * np.log10(np.abs(positiveValue))
def ISDistortion(X,Y):
"""
value = ISDistortion(X, Y)
Returns the value of the Itakura-Saito (IS) divergence between
matrix X and matrix Y. X and Y should be two NumPy arrays with
same dimension.
"""
return np.sum((-np.log(X / Y) + (X / Y) - 1))
def SIMM(# the data to be fitted to:
SX,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=4, numberOfAccompanimentSpectralShapes=10,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=1000, updateRulePower=1.0,
stepNotes=4,
lambdaHF0=0.00,alphaHF0=0.99,
displayEvolution=False, verbose=True, makeMovie=False):
"""
HGAMMA, HPHI, HF0, HM, WM, recoError =
SIMM(SX, WF0, WGAMMA, numberOfFilters=4,
numberOfAccompanimentSpectralShapes=10, HGAMMA0=None, HPHI0=None,
HF00=None, WM0=None, HM0=None, numberOfIterations=1000,
updateRulePower=1.0, stepNotes=4,
lambdaHF0=0.00, alphaHF0=0.99, displayEvolution=False,
verbose=True)
Implementation of the Smooth-filters Instantaneous Mixture Model
(SIMM). This model can be used to estimate the main melody of a
song, and separate the lead voice from the accompaniment, provided
that the basis WF0 is constituted of elements associated to
particular pitches.
Inputs:
SX
the F x N power spectrogram to be approximated.
F is the number of frequency bins, while N is the number of
analysis frames
WF0
the F x NF0 basis matrix containing the NF0 source elements
WGAMMA
the F x P basis matrix of P smooth elementary filters
numberOfFilters
the number of filters K to be considered
numberOfAccompanimentSpectralShapes
the number of spectral shapes R for the accompaniment
HGAMMA0
the P x K decomposition matrix of WPHI on WGAMMA
HPHI0
the K x N amplitude matrix of the filter part of the lead
instrument
HF00
the NF0 x N amplitude matrix for the source part of the lead
instrument
WM0
the F x R the matrix for spectral shapes of the
accompaniment
HM0
the R x N amplitude matrix associated with each of the R
accompaniment spectral shapes
numberOfIterations
the number of iterations for the estimatino algorithm
updateRulePower
the power to which the multiplicative gradient is elevated to
stepNotes
the number of elements in WF0 per semitone. stepNotes=4 means
that there are 48 elements per octave in WF0.
lambdaHF0
Lagrangian multiplier for the octave control
alphaHF0
parameter that controls how much influence a lower octave
can have on the upper octave's amplitude.
Outputs:
HGAMMA
the estimated P x K decomposition matrix of WPHI on WGAMMA
HPHI
the estimated K x N amplitude matrix of the filter part
HF0
the estimated NF0 x N amplitude matrix for the source part
HM
the estimated R x N amplitude matrix for the accompaniment
WM
the estimate F x R spectral shapes for the accompaniment
recoError
the successive values of the Itakura Saito divergence
between the power spectrogram and the spectrogram
computed thanks to the updated estimations of the matrices.
Please also refer to the following article for more details about
the algorithm within this function, as well as the meaning of the
different matrices that are involved:
J.-L. Durrieu, G. Richard, B. David and C. Fevotte
Source/Filter Model for Unsupervised Main Melody
Extraction From Polyphonic Audio Signals
IEEE Transactions on Audio, Speech and Language Processing
Vol. 18, No. 3, March 2010
"""
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
# renamed for convenience:
K = numberOfFilters
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SX.shape
Fwf0, NF0 = WF0.shape
Fwgamma, P = WGAMMA.shape
# Checking the sizes of the matrices
if Fwf0 != F:
return False # A REVOIR!!!
if HGAMMA0 is None:
HGAMMA0 = np.abs(randn(P, K))
else:
if not(isinstance(HGAMMA0,np.ndarray)): # default behaviour
HGAMMA0 = np.array(HGAMMA0)
Phgamma0, Khgamma0 = HGAMMA0.shape
if Phgamma0 != P or Khgamma0 != K:
print "Wrong dimensions for given HGAMMA0, \n"
print "random initialization used instead"
HGAMMA0 = np.abs(randn(P, K))
HGAMMA = np.copy(HGAMMA0)
if HPHI0 is None: # default behaviour
HPHI = np.abs(randn(K, N))
else:
Khphi0, Nhphi0 = np.array(HPHI0).shape
if Khphi0 != K or Nhphi0 != N:
print "Wrong dimensions for given HPHI0, \n"
print "random initialization used instead"
HPHI = np.abs(randn(K, N))
else:
HPHI = np.copy(np.array(HPHI0))
if HF00 is None:
HF00 = np.abs(randn(NF0, N))
else:
if np.array(HF00).shape[0] == NF0 and np.array(HF00).shape[1] == N:
HF00 = np.array(HF00)
else:
print "Wrong dimensions for given HF00, \n"
print "random initialization used instead"
HF00 = np.abs(randn(NF0, N))
HF0 = np.copy(HF00)
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
# Iterations to estimate the SIMM parameters:
WPHI = np.dot(WGAMMA, HGAMMA)
SF0 = np.dot(WF0, HF0)
SPHI = np.dot(WPHI, HPHI)
SM = np.dot(WM, HM)
hatSX = SF0 * SPHI + SM
## SX = SX + np.abs(randn(F, N)) ** 2
# should not need this line
# which ensures that data is not
# 0 everywhere.
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
# Array containing the reconstruction error after the update of each
# of the parameter matrices:
recoError = np.zeros([numberOfIterations * 5 * 2 + NF0 * 2 + 1])
recoError[0] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
if makeMovie:
dirName = 'tmp%s/' %time.strftime("%Y%m%d%H%M%S")
os.system('mkdir %s' %dirName)
# Main loop for multiplicative updating rules:
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf();imageM(db(HF0));
plt.clim([np.amax(db(HF0))-100, np.amax(db(HF0))]);plt.draw();
## h1.clf();
## imageM(HF0 * np.outer(np.ones([NF0, 1]),
## 1 / (HF0.max(axis=0))));
if makeMovie:
filename = dirName + '%04d' % n + '.png'
plt.savefig(filename, dpi=100)
# updating HF0:
tempNumFbyN = (SPHI * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SPHI / np.maximum(hatSX, eps)
# This to enable octave control
HF0[np.arange(12 * stepNotes, NF0), :] \
= HF0[np.arange(12 * stepNotes, NF0), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes,
NF0)].T, tempNumFbyN) \
/ np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes, NF0)].T,
tempDenFbyN) \
+ lambdaHF0 * (- (alphaHF0 - 1.0) \
/ np.maximum(HF0[
np.arange(12 * stepNotes, NF0), :], eps) \
+ HF0[
np.arange(NF0 - 12 * stepNotes), :]),
eps)) ** omega
HF0[np.arange(12 * stepNotes), :] \
= HF0[np.arange(12 * stepNotes), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempNumFbyN) /
np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempDenFbyN), eps)) ** omega
## # normal update rules:
## HF0 = HF0 * (np.dot(WF0.T, tempNumFbyN) /
## np.maximum(np.dot(WF0.T, tempDenFbyN), eps)) ** omega
SF0 = np.maximum(np.dot(WF0, HF0),eps)
hatSX = np.maximum(SF0 * SPHI + SM,eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HF0 : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HPHI
tempNumFbyN = (SF0 * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SF0 / np.maximum(hatSX, eps)
HPHI = HPHI * (np.dot(WPHI.T, tempNumFbyN) / np.maximum(np.dot(WPHI.T, tempDenFbyN), eps)) ** omega
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HPHI : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HM
tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
tempDenFbyN = 1 / np.maximum(hatSX, eps)
HM = np.maximum(HM * (np.dot(WM.T, tempNumFbyN) / np.maximum(np.dot(WM.T, tempDenFbyN), eps)) ** omega, eps)
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HM : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HGAMMA
tempNumFbyN = (SF0 * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SF0 / np.maximum(hatSX, eps)
HGAMMA = np.maximum(HGAMMA * (np.dot(WGAMMA.T, np.dot(tempNumFbyN, HPHI.T)) / np.maximum(np.dot(WGAMMA.T, np.dot(tempDenFbyN, HPHI.T)), eps)) ** omega, eps)
sumHGAMMA = np.sum(HGAMMA, axis=0)
HGAMMA[:, sumHGAMMA>0] = HGAMMA[:, sumHGAMMA>0] / np.outer(np.ones(P), sumHGAMMA[sumHGAMMA>0])
HPHI = HPHI * np.outer(sumHGAMMA, np.ones(N))
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
WPHI = np.maximum(np.dot(WGAMMA, HGAMMA), eps)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HGAMMA: ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM, after a certain number of iterations (here, after 1 iteration)
if n > -1: # this test can be used such that WM is updated only
# after a certain number of iterations
tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
tempDenFbyN = 1 / np.maximum(hatSX, eps)
WM = np.maximum(WM * (np.dot(tempNumFbyN, HM.T) /
np.maximum(np.dot(tempDenFbyN, HM.T),
eps)) ** omega, eps)
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return HGAMMA, HPHI, HF0, HM, WM, recoError
def Stereo_SIMM(# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=4, numberOfAccompanimentSpectralShapes=10,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=1000, updateRulePower=1.0,
stepNotes=4,
lambdaHF0=0.00,alphaHF0=0.99,
displayEvolution=False, verbose=True,
updateHGAMMA=True):
"""
HGAMMA, HPHI, HF0, HM, WM, recoError =
SIMM(SXR, SXL, WF0, WGAMMA, numberOfFilters=4,
numberOfAccompanimentSpectralShapes=10, HGAMMA0=None, HPHI0=None,
HF00=None, WM0=None, HM0=None, numberOfIterations=1000,
updateRulePower=1.0, stepNotes=4,
lambdaHF0=0.00, alphaHF0=0.99, displayEvolution=False,
verbose=True)
Implementation of the Smooth-filters Instantaneous Mixture Model
(SIMM). This model can be used to estimate the main melody of a
song, and separate the lead voice from the accompaniment, provided
that the basis WF0 is constituted of elements associated to
particular pitches.
Inputs:
SX
the F x N power spectrogram to be approximated.
F is the number of frequency bins, while N is the number of
analysis frames
WF0
the F x NF0 basis matrix containing the NF0 source elements
WGAMMA
the F x P basis matrix of P smooth elementary filters
numberOfFilters
the number of filters K to be considered
numberOfAccompanimentSpectralShapes
the number of spectral shapes R for the accompaniment
HGAMMA0
the P x K decomposition matrix of WPHI on WGAMMA
HPHI0
the K x N amplitude matrix of the filter part of the lead
instrument
HF00
the NF0 x N amplitude matrix for the source part of the lead
instrument
WM0
the F x R the matrix for spectral shapes of the
accompaniment
HM0
the R x N amplitude matrix associated with each of the R
accompaniment spectral shapes
numberOfIterations
the number of iterations for the estimatino algorithm
updateRulePower
the power to which the multiplicative gradient is elevated to
stepNotes
the number of elements in WF0 per semitone. stepNotes=4 means
that there are 48 elements per octave in WF0.
lambdaHF0
Lagrangian multiplier for the octave control
alphaHF0
parameter that controls how much influence a lower octave
can have on the upper octave's amplitude.
Outputs:
HGAMMA
the estimated P x K decomposition matrix of WPHI on WGAMMA
HPHI
the estimated K x N amplitude matrix of the filter part
HF0
the estimated NF0 x N amplitude matrix for the source part
HM
the estimated R x N amplitude matrix for the accompaniment
WM
the estimate F x R spectral shapes for the accompaniment
recoError
the successive values of the Itakura Saito divergence
between the power spectrogram and the spectrogram
computed thanks to the updated estimations of the matrices.
Please also refer to the following article for more details about
the algorithm within this function, as well as the meaning of the
different matrices that are involved:
J.-L. Durrieu, G. Richard, B. David and C. Fevotte
Source/Filter Model for Unsupervised Main Melody
Extraction From Polyphonic Audio Signals
IEEE Transactions on Audio, Speech and Language Processing
Vol. 18, No. 3, March 2010
"""
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
# renamed for convenience:
K = numberOfFilters
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SXR.shape
if (F, N) != SXL.shape:
print "The input STFT matrices do not have the same dimension.\n"
print "Please check what happened..."
raise ValueError("Dimension of STFT matrices must be the same.")
Fwf0, NF0 = WF0.shape
Fwgamma, P = WGAMMA.shape
# Checking the sizes of the matrices
if Fwf0 != F:
return False # A REVOIR!!!
if HGAMMA0 is None:
HGAMMA0 = np.abs(randn(P, K))
else:
if not(isinstance(HGAMMA0,np.ndarray)): # default behaviour
HGAMMA0 = np.array(HGAMMA0)
Phgamma0, Khgamma0 = HGAMMA0.shape
if Phgamma0 != P or Khgamma0 != K:
print "Wrong dimensions for given HGAMMA0, \n"
print "random initialization used instead"
HGAMMA0 = np.abs(randn(P, K))
HGAMMA = np.copy(HGAMMA0)
if HPHI0 is None: # default behaviour
HPHI = np.abs(randn(K, N))
else:
Khphi0, Nhphi0 = np.array(HPHI0).shape
if Khphi0 != K or Nhphi0 != N:
print "Wrong dimensions for given HPHI0, \n"
print "random initialization used instead"
HPHI = np.abs(randn(K, N))
else:
HPHI = np.copy(np.array(HPHI0))
if HF00 is None:
HF00 = np.abs(randn(NF0, N))
else:
if np.array(HF00).shape[0] == NF0 and np.array(HF00).shape[1] == N:
HF00 = np.array(HF00)
else:
print "Wrong dimensions for given HF00, \n"
print "random initialization used instead"
HF00 = np.abs(randn(NF0, N))
HF0 = np.copy(HF00)
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
alphaR = 0.5
alphaL = 0.5
betaR = np.diag(np.random.rand(R))
betaL = np.eye(R) - betaR
# Iterations to estimate the SIMM parameters:
WPHI = np.dot(WGAMMA, HGAMMA)
SF0 = np.dot(WF0, HF0)
SPHI = np.dot(WPHI, HPHI)
# SM = np.dot(WM, HM)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
# SX = SX + np.abs(randn(F, N)) ** 2
# should not need this line
# which ensures that data is not
# 0 everywhere.
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
# Array containing the reconstruction error after the update of each
# of the parameter matrices:
recoError = np.zeros([numberOfIterations * 5 * 2 + NF0 * 2 + 1])
recoError[0] = ISDistortion(SXR, hatSXR) + ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
# Main loop for multiplicative updating rules:
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf();imageM(db(HF0));
plt.clim([np.amax(db(HF0))-100, np.amax(db(HF0))]);plt.draw();
# h1.clf();
# imageM(HF0 * np.outer(np.ones([NF0, 1]),
# 1 / (HF0.max(axis=0))));
# updating HF0:
tempNumFbyN = ((alphaR**2) * SPHI * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL**2) * SPHI * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR**2) * SPHI / np.maximum(hatSXR, eps)\
+ (alphaL**2) * SPHI / np.maximum(hatSXL, eps)
# This to enable octave control
HF0[np.arange(12 * stepNotes, NF0), :] \
= HF0[np.arange(12 * stepNotes, NF0), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes,
NF0)].T, tempNumFbyN) \
/ np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes, NF0)].T,
tempDenFbyN) \
+ lambdaHF0 * (- (alphaHF0 - 1.0) \
/ np.maximum(HF0[
np.arange(12 * stepNotes, NF0), :], eps) \
+ HF0[
np.arange(NF0 - 12 * stepNotes), :]),
eps)) ** omega
HF0[np.arange(12 * stepNotes), :] \
= HF0[np.arange(12 * stepNotes), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempNumFbyN) /
np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempDenFbyN), eps)) ** omega
## # normal update rules:
## HF0 = HF0 * (np.dot(WF0.T, tempNumFbyN) /
## np.maximum(np.dot(WF0.T, tempDenFbyN), eps)) ** omega
SF0 = np.maximum(np.dot(WF0, HF0), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM),
eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM),
eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HF0 : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HPHI
if updateHGAMMA or True:
tempNumFbyN = ((alphaR**2) * SF0 * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL**2) * SF0 * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR**2) * SF0 / np.maximum(hatSXR, eps)\
+ (alphaL**2) * SF0 / np.maximum(hatSXL, eps)
HPHI = HPHI * (np.dot(WPHI.T, tempNumFbyN) / np.maximum(np.dot(WPHI.T, tempDenFbyN), eps)) ** omega
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM),
eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM),
eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HPHI : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HM
# tempNumFbyN = SXR / np.maximum(hatSXR ** 2, eps)\
# + SXL / np.maximum(hatSXL ** 2, eps)
# tempDenFbyN = 1 / np.maximum(hatSXR, eps)\
# + 1 / np.maximum(hatSXL, eps)
# HM = np.maximum(HM * (np.dot(WM.T, tempNumFbyN) / np.maximum(np.dot(WM.T, tempDenFbyN), eps)) ** omega, eps)
HM = HM * \
((np.dot(np.dot((betaR**2), WM.T), SXR /
np.maximum(hatSXR ** 2, eps)) +
np.dot(np.dot((betaL**2), WM.T), SXL /
np.maximum(hatSXL ** 2, eps))
) /
np.maximum(np.dot(np.dot((betaR**2), WM.T), 1 /
np.maximum(hatSXR, eps)) +
np.dot(np.dot((betaL**2), WM.T), 1 /
np.maximum(hatSXL, eps)),
eps)) ** omega
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HM : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HGAMMA
if updateHGAMMA:
tempNumFbyN = ((alphaR ** 2) * SF0 * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL ** 2) * SF0 * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR ** 2) * SF0 / np.maximum(hatSXR, eps) \
+ (alphaL ** 2) * SF0 / np.maximum(hatSXL, eps)
HGAMMA = np.maximum(HGAMMA * (np.dot(WGAMMA.T, np.dot(tempNumFbyN, HPHI.T)) / np.maximum(np.dot(WGAMMA.T, np.dot(tempDenFbyN, HPHI.T)), eps)) ** omega, eps)
sumHGAMMA = np.sum(HGAMMA, axis=0)
HGAMMA[:, sumHGAMMA>0] = HGAMMA[:, sumHGAMMA>0] / np.outer(np.ones(P), sumHGAMMA[sumHGAMMA>0])
HPHI = HPHI * np.outer(sumHGAMMA, np.ones(N))
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
WPHI = np.maximum(np.dot(WGAMMA, HGAMMA), eps)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HGAMMA: ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM, after a certain number of iterations (here, after 1 iteration)
if n > -1: # this test can be used such that WM is updated only
# after a certain number of iterations
## tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
## tempDenFbyN = 1 / np.maximum(hatSX, eps)
## WM = np.maximum(WM * (np.dot(tempNumFbyN, HM.T) /
## np.maximum(np.dot(tempDenFbyN, HM.T),
## eps)) ** omega, eps)
WM = WM * \
((np.dot(SXR / np.maximum(hatSXR ** 2, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(SXL / np.maximum(hatSXL ** 2, eps),
np.dot(HM.T, betaL ** 2))
) /
(np.dot(1 / np.maximum(hatSXR, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(1 / np.maximum(hatSXL, eps),
np.dot(HM.T, betaL ** 2))
)) ** omega
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating alphaR and alphaL:
tempNumFbyN = SF0 * SPHI * SXR / np.maximum(hatSXR ** 2, eps)
tempDenFbyN = SF0 * SPHI / np.maximum(hatSXR, eps)
alphaR = np.maximum(alphaR *
(np.sum(tempNumFbyN) /
np.sum(tempDenFbyN)) ** (omega*.1), eps)
tempNumFbyN = SF0 * SPHI * SXL / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = SF0 * SPHI / np.maximum(hatSXL, eps)
alphaL = np.maximum(alphaL *
(np.sum(tempNumFbyN) /
np.sum(tempDenFbyN)) ** (omega*.1), eps)
alphaR = alphaR / np.maximum(alphaR + alphaL, .001)
alphaL = np.copy(1 - alphaR)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after ALPHA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating betaR and betaL
betaR = np.diag(np.diag(np.maximum(betaR *
((np.dot(np.dot(WM.T, SXR / np.maximum(hatSXR ** 2, eps)), HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXR, eps)), HM.T))) ** (omega*.1), eps)))
betaL = np.diag(np.diag(np.maximum(betaL *
((np.dot(np.dot(WM.T, SXL / np.maximum(hatSXL ** 2, eps)), HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXL, eps)), HM.T))) ** (omega*.1), eps)))
betaR = betaR / np.maximum(betaR + betaL, eps)
betaL = np.copy(np.eye(R) - betaR)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after BETA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return alphaR, alphaL, HGAMMA, HPHI, HF0, betaR, betaL, HM, WM, recoError
def stereo_NMF(SXR, SXL,
numberOfAccompanimentSpectralShapes,
WM0=None, HM0=None,
numberOfIterations=50, updateRulePower=1.0,
verbose=False, displayEvolution=False):
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SXR.shape
if (F, N) != SXL.shape:
print "The input STFT matrices do not have the same dimension.\n"
print "Please check what happened..."
raise ValueError("Dimension of STFT matrices must be the same.")
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
betaR = np.diag(np.random.rand(R))
betaL = np.eye(R) - betaR
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
recoError = np.zeros([numberOfIterations * 3 + 1])
recoError[0] = ISDistortion(SXR, hatSXR) + ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf()
imageM(db(hatSXR))
plt.clim([np.amax(db(hatSXR))-100, np.amax(db(hatSXR))])
plt.draw()
# updating HM
HM = HM * \
((np.dot(np.dot((betaR**2), WM.T), SXR /
np.maximum(hatSXR ** 2, eps)) +
np.dot(np.dot((betaL**2), WM.T), SXL /
np.maximum(hatSXL ** 2, eps))
) /
np.maximum(np.dot(np.dot((betaR**2), WM.T), 1 /
np.maximum(hatSXR, eps)) +
np.dot(np.dot((betaL**2), WM.T), 1 /
np.maximum(hatSXL, eps)),
eps)) ** omega
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HM : ",\
recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM
WM = WM * \
((np.dot(SXR / np.maximum(hatSXR ** 2, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(SXL / np.maximum(hatSXL ** 2, eps),
np.dot(HM.T, betaL ** 2))
) /
(np.dot(1 / np.maximum(hatSXR, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(1 / np.maximum(hatSXL, eps),
np.dot(HM.T, betaL ** 2))
)) ** omega
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating betaR and betaL
betaR = np.diag(np.diag(np.maximum(betaR *
((np.dot(np.dot(WM.T, SXR / np.maximum(hatSXR ** 2,
eps)),
HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXR,
eps)),
HM.T))) ** (omega*.1), eps)))
betaL = np.diag(np.diag(np.maximum(betaL *
((np.dot(np.dot(WM.T, SXL / np.maximum(hatSXL ** 2,
eps)),
HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXL,
eps)),
HM.T))) ** (omega*.1), eps)))
betaR = betaR / np.maximum(betaR + betaL, eps)
betaL = np.copy(np.eye(R) - betaR)
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after BETA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return betaR, betaL, HM, WM
|
python
|
import sys
import os
import torch
from helen.modules.python.TextColor import TextColor
from helen.modules.python.models.predict_cpu import predict_cpu
from helen.modules.python.models.predict_gpu import predict_gpu
from helen.modules.python.FileManager import FileManager
from os.path import isfile, join
from os import listdir
"""
The Call Consensus method generates base predictions for images generated through MarginPolish. This script reads
hdf5 files generated by MarginPolish and produces another Hdf5 file that holds all predictions. The generated hdf5 file
is given to stitch.py which then stitches the segments using an alignment which gives us a polished sequence.
The algorithm is described here:
1) INPUTS:
- directory path to the image files generated by MarginPolish
- model path directing to a trained model
- batch size for mini-batch prediction
- num workers for mini-batch processing threads
- output directory path to where the output hdf5 will be saved
- gpu mode indicating if GPU will be used
2) METHOD:
- Call predict function that loads the neural network and generates base predictions and saves it into a hdf5 file
- Loads the model
- Iterates over the input images in minibatch
- For each image uses a sliding window method to slide of the image sequence
- Aggregate the predictions to get sequence prediction for the entire image sequence
- Save all the predictions to a file
3) OUTPUT:
- A hdf5 file containing all the base predictions
"""
def get_file_paths_from_directory(directory_path):
"""
Returns all paths of files given a directory path
:param directory_path: Path to the directory
:return: A list of paths of files
"""
file_paths = [os.path.abspath(join(directory_path, file)) for file in listdir(directory_path)
if isfile(join(directory_path, file)) and file[-2:] == 'h5']
return file_paths
def call_consensus(image_dir, model_path, batch_size, num_workers, threads, output_dir, output_prefix, gpu_mode,
device_ids, callers):
"""
This method provides an interface too call the predict method that generates the prediction hdf5 file
:param image_dir: Path to directory where all MarginPolish images are saved
:param model_path: Path to a trained model
:param batch_size: Batch size for minibatch processing
:param num_workers: Number of workers for minibatch processing
:param threads: Number of threads for pytorch
:param output_dir: Path to the output directory
:param output_prefix: Prefix of the output HDF5 file
:param gpu_mode: If true, predict method will use GPU.
:param device_ids: List of CUDA devices to use.
:param callers: Total number of callers.
:return:
"""
# check the model file
if not os.path.isfile(model_path):
sys.stderr.write(TextColor.RED + "ERROR: CAN NOT LOCATE MODEL FILE.\n" + TextColor.END)
exit(1)
# check the input directory
if not os.path.isdir(image_dir):
sys.stderr.write(TextColor.RED + "ERROR: CAN NOT LOCATE IMAGE DIRECTORY.\n" + TextColor.END)
exit(1)
# check batch_size
if batch_size <= 0:
sys.stderr.write(TextColor.RED + "ERROR: batch_size NEEDS TO BE >0.\n" + TextColor.END)
exit(1)
# check num_workers
if num_workers < 0:
sys.stderr.write(TextColor.RED + "ERROR: num_workers NEEDS TO BE >=0.\n" + TextColor.END)
exit(1)
# check number of threads
if threads <= 0:
sys.stderr.write(TextColor.RED + "ERROR: THREAD NEEDS TO BE >=0.\n" + TextColor.END)
exit(1)
output_dir = FileManager.handle_output_directory(output_dir)
# create a filename for the output file
output_filename = os.path.join(output_dir, output_prefix)
# inform the output directory
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "OUTPUT FILE: " + output_filename + "\n")
if gpu_mode:
# Make sure that GPU is
if not torch.cuda.is_available():
sys.stderr.write(TextColor.RED + "ERROR: TORCH IS NOT BUILT WITH CUDA.\n" + TextColor.END)
sys.stderr.write(TextColor.RED + "SEE TORCH CAPABILITY:\n$ python3\n"
">>> import torch \n"
">>> torch.cuda.is_available()\n If true then cuda is avilable"
+ TextColor.END)
exit(1)
# Now see which devices to use
if device_ids is None:
total_gpu_devices = torch.cuda.device_count()
sys.stderr.write(TextColor.GREEN + "INFO: TOTAL GPU AVAILABLE: " + str(total_gpu_devices) + "\n" + TextColor.END)
device_ids = [i for i in range(0, total_gpu_devices)]
callers = total_gpu_devices
else:
device_ids = [int(i) for i in device_ids.split(',')]
for device_id in device_ids:
major_capable, minor_capable = torch.cuda.get_device_capability(device=device_id)
if major_capable < 0:
sys.stderr.write(TextColor.RED + "ERROR: GPU DEVICE: " + str(device_id) + " IS NOT CUDA CAPABLE.\n" + TextColor.END)
sys.stderr.write(TextColor.GREEN + "Try running: $ python3\n"
">>> import torch \n"
">>> torch.cuda.get_device_capability(device="
+ str(device_id) + ")\n" + TextColor.END)
else:
sys.stderr.write(TextColor.GREEN + "INFO: CAPABILITY OF GPU#" + str(device_id)
+ ":\t" + str(major_capable) + "-" + str(minor_capable) + "\n" + TextColor.END)
callers = len(device_ids)
sys.stderr.write(TextColor.GREEN + "INFO: AVAILABLE GPU DEVICES: " + str(device_ids) + "\n" + TextColor.END)
threads_per_caller = 0
else:
# calculate how many threads each caller can use
threads_per_caller = int(threads / callers)
device_ids = []
# chunk the inputs
input_files = get_file_paths_from_directory(image_dir)
# generate file chunks to process in parallel
file_chunks = [[] for i in range(callers)]
for i in range(0, len(input_files)):
file_chunks[i % callers].append(input_files[i])
# get the file chunks
file_chunks = [file_chunks[i] for i in range(len(file_chunks)) if len(file_chunks[i]) > 0]
callers = len(file_chunks)
if gpu_mode:
# Distributed GPU setup
predict_gpu(file_chunks, output_filename, model_path, batch_size, callers, device_ids, num_workers)
else:
# distributed CPU setup, call the prediction function
predict_cpu(file_chunks, output_filename, model_path, batch_size,
callers, threads_per_caller, num_workers)
# notify the user that process has completed successfully
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PREDICTION GENERATED SUCCESSFULLY.\n")
|
python
|
from dpconverge.data_set import DataSet
import numpy as np
n_features = 2
points_per_feature = 100
centers = [[2, 2], [4, 4]]
ds = DataSet(parameter_count=2)
n_samples = 500
outer_circ_x = 1.0 + np.cos(np.linspace(0, np.pi, n_samples)) / 2
outer_circ_y = 0.5 + np.sin(np.linspace(0, np.pi, n_samples))
X = np.vstack((outer_circ_x, outer_circ_y)).T
np.random.seed(1)
X[:, 0] += (np.random.rand(500) - 0.5) / 16
X[:, 1] += (np.random.rand(500) - 0.5) / 16
X[:, 0] += (np.random.rand(500) - 0.5) / 16
X[:, 1] += (np.random.rand(500) - 0.5) / 16
ds.add_blob(1, X)
ds.plot_blobs(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
component_count = 32
ds.cluster(
component_count=component_count,
burn_in=1000,
iteration_count=200,
random_seed=123
)
valid_components = ds.get_valid_components()
print "Recommended component count: ", len(valid_components)
for i in range(component_count):
if i in valid_components:
ds.plot_iteration_traces(i)
for i in range(component_count):
if i not in valid_components:
print "Possible invalid Component"
ds.plot_iteration_traces(i)
ds.plot_animated_trace()
|
python
|
# Generated by Django 3.1.1 on 2020-09-16 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('erp', '0091_auto_20200914_1720'),
('erp', '0091_auto_20200914_1638'),
]
operations = [
]
|
python
|
"""Montrer dans le widget
Revision ID: 8b4768bb1336
Revises: dc85620e95c3
Create Date: 2021-04-12 17:24:31.906506
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8b4768bb1336'
down_revision = 'dc85620e95c3'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('recommandation', sa.Column('montrer_dans_le_widget', sa.Boolean(), nullable=True))
def downgrade():
op.drop_column('recommandation', 'montrer_dans_le_widget')
|
python
|
def helper(s, k, maxstr, ctr):
# print("YES")
if k == 0 or ctr == len(s):
return
n = len(s)
maxx = s[ctr]
for i in range(ctr+1, n):
if int(maxx) < int(s[i]):
maxx = s[i]
if maxx != s[ctr]:
k -= 1
for j in range(n-1, ctr, -1):
if int(s[j]) == int(maxx):
s[j], s[ctr] = s[ctr], s[j]
if int(maxstr[0]) < int("".join(map(str, s))):
maxstr[0] = "".join(map(str, s))
helper(s, k, maxstr, ctr+1)
s[j], s[ctr] = s[ctr], s[j]
else:
helper(s, k, maxstr, ctr+1)
class Solution:
#Function to find the largest number after k swaps.
def findMaximumNum(self, s, k):
#code here
maxx = [s]
s = list(map(str, s.strip()))
helper(s, k, maxx, 0)
return maxx[0]
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == "__main__":
for _ in range(1):
k = 3
s = "3435335"
ob = Solution()
print(ob.findMaximumNum(s, k))
# } Driver Code Ends
|
python
|
# # -*- coding: utf-8 -*-
# import scrapy
#
# import re
#
# class A55Spider(scrapy.Spider):
# name = '55'
# allowed_domains = ['fsx.sxxz.gov.cn']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/xxgkzn/']
#
# def parse(self, response):
# navi_list = response.xpath('//ul[@class="item-nav"]//@href').extract()
# web_domain = "http://fsx.sxxz.gov.cn/fsxzw/zwgk"
# for navi in navi_list:
# complete_url = web_domain + navi[2:]
# yield scrapy.Request(url=complete_url, callback=self.extract_table)
#
# def extract_table(self, response):
# web_url = response.url
# url_rule = re.compile(r'/\d+/t\d+_\d+\.html$')
# if url_rule.match(web_url):
# yield scrapy.Request(url=web_url, callback=self.table_url)
|
python
|
# Pop() -> Remove um elemento do endereço especifícado.
lista_4 = [10,9,8,7,5,6,4,2,3,1,2,3]
print(lista_4)
lista_4.pop(2)
print(lista_4)
lista_4.pop(-1)
print(lista_4)
|
python
|
from typing import List, Union
import numpy as np
def get_test_function_method_min(n: int, a: List[List[float]], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание, генерирует и возвращает тестовую функцию, применяя метод Фельдбаума,
т. е. применяя оператор минимума к одноэкстремальным степенным функциям.
:param n: количество экстремумов, целое число >= 1
:param a: список коэффициентов крутости экстремумов (длиной n), чем выше значения,
тем быстрее функция убывает/возрастает и тем уже область экстремума, List[List[float]]
:param c: список координат экстремумов длиной n, List[List[float]]
:param p: список степеней гладкости в районе экстремума,
если 0<p[i][j]<=1 функция в точке экстремума будет угловой
:param b: список значений функции (длиной n) в экстремуме, List[float], len(b) = n
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
l = []
for i in range(n):
res = 0
for j in range(len(x)):
res = res + a[i][j] * np.abs(x[j] - c[i][j]) ** p[i][j]
res = res + b[i]
l.append(res)
res = np.array(l)
return np.min(res)
return func
def get_tf_hyperbolic_potential_abs(n: int, a: List[float], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на гиперболических потенциалах с аддитивными модульными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a: одномерный список коэффициентов (длиной n), определяющих крутость функции в районе экстремума
:param c: двумерный список координат экстремумов длиной n, List[List[float]]
:param p:
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + np.abs(x[j] - c[i][j]) ** p[i][j]
res = a[i] * res + b[i]
res = -(1 / res)
value = value + res
return value
return func
def get_tf_hyperbolic_potential_sqr(n: int, a: List[List[float]], c: List[List[float]], b):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на гиперболических потенциалах с иддитивными квадратичными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a:
:param c: двумерный список координат экстремумов длиной n,
List[List[float]], размерность n * m, m - размерность задачи
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + a[i][j] * (x[j] - c[i][j]) ** 2 # правильно ли стоит a???????
res = res + b[i]
res = -(1 / res)
value = value + res
return value
return func
def get_tf_exponential_potential(n: int, a: List[float], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на экспоненциальных потенциалах с аддитивными модульными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a: одномерный список коэффициентов (длиной n), определяющих крутость функции в районе экстремума
:param c: двумерный список координат экстремумов, List[List[float]], размерность n * m, m - размерность задачи
:param p: двумерный список степеней гладкости функции в районе экстремума, List[List[float]], размерность n * m
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + np.abs(x[j] - c[i][j]) ** p[i][j]
res = (-b[i]) * np.exp((-a[i]) * res)
value = value + res
return value
return func
def get_test_func(type_func: str, n: int,
a: List[Union[List[float], float]], c: List[List[float]], p: List[List[float]], b: List[float]):
"""Возвращает необходимую функцию в зависимости от переданного типа"""
if type_func == "feldbaum_function":
func = get_test_function_method_min(n, a, c, p, b)
elif type_func == "hyperbolic_potential_abs":
func = get_tf_hyperbolic_potential_abs(n, a, c, p, b)
elif type_func == "exponential_potential":
func = get_tf_exponential_potential(n, a, c, p, b)
else:
func = None
return func
|
python
|
from typing import cast, Mapping, Any, List, Tuple
from .models import PortExpenses, Port
def parse_port_expenses(json: Mapping[str, Any]) -> PortExpenses:
return PortExpenses(
cast(int, json.get("PortId")),
cast(int, json.get("PortCanal")),
cast(int, json.get("Towage")),
cast(int, json.get("Berth")),
cast(int, json.get("PortDues")),
cast(int, json.get("Lighthouse")),
cast(int, json.get("Mooring")),
cast(int, json.get("Pilotage")),
cast(int, json.get("Quay")),
cast(int, json.get("Anchorage")),
cast(int, json.get("AgencyFees")),
cast(int, json.get("Other")),
cast(int, json.get("SuezDues")),
cast(int, json.get("TotalCost")),
cast(int, json.get("MiscellaneousDues")),
cast(bool, json.get("IsEstimated")),
cast(int, json.get("CanalDues")),
cast(int, json.get("BerthDues")),
cast(int, json.get("LighthouseDues")),
cast(int, json.get("MooringUnmooring")),
cast(int, json.get("QuayDues")),
cast(int, json.get("AnchorageDues")),
cast(List[int], json.get("PortAgents")),
)
def parse_ports(json: Mapping[str, Any]) -> Tuple[Port, ...]:
ports: List[Port] = []
json_ports = json.get("Ports")
if json_ports is not None and isinstance(json_ports, list):
for port_json in json_ports:
port = Port(
cast(int, port_json.get("PortId")),
cast(str, port_json.get("PortName")),
)
ports.append(port)
return tuple(ports)
|
python
|
from typing import Callable
from rx import operators as ops
from rx.core import Observable, pipe
from rx.core.typing import Predicate
def _all(predicate: Predicate) -> Callable[[Observable], Observable]:
filtering = ops.filter(lambda v: not predicate(v))
mapping = ops.map(lambda b: not b)
some = ops.some()
return pipe(
filtering,
some,
mapping
)
|
python
|
import uuid
from datetime import datetime
from os import path
from sqlalchemy.orm.scoping import scoped_session
import factory
import factory.fuzzy
from app.extensions import db
from tests.status_code_gen import *
from app.api.applications.models.application import Application
from app.api.document_manager.models.document_manager import DocumentManager
from app.api.documents.expected.models.mine_expected_document import MineExpectedDocument
from app.api.documents.mines.models.mine_document import MineDocument
from app.api.documents.variances.models.variance import VarianceDocumentXref
from app.api.mines.location.models.mine_location import MineLocation
from app.api.mines.mine.models.mine import Mine
from app.api.mines.mine.models.mine_type import MineType
from app.api.mines.mine.models.mine_type_detail import MineTypeDetail
from app.api.mines.mine.models.mine_verified_status import MineVerifiedStatus
from app.api.mines.incidents.models.mine_incident import MineIncident
from app.api.mines.status.models.mine_status import MineStatus
from app.api.mines.subscription.models.subscription import Subscription
from app.api.mines.tailings.models.tailings import MineTailingsStorageFacility
from app.api.parties.party.models.party import Party
from app.api.parties.party.models.address import Address
from app.api.parties.party_appt.models.mine_party_appt import MinePartyAppointment
from app.api.permits.permit.models.permit import Permit
from app.api.permits.permit_amendment.models.permit_amendment import PermitAmendment
from app.api.permits.permit_amendment.models.permit_amendment_document import PermitAmendmentDocument
from app.api.users.core.models.core_user import CoreUser, IdirUserDetail
from app.api.users.minespace.models.minespace_user import MinespaceUser
from app.api.variances.models.variance import Variance
from app.api.parties.party_appt.models.party_business_role_appt import PartyBusinessRoleAppointment
GUID = factory.LazyFunction(uuid.uuid4)
TODAY = factory.LazyFunction(datetime.now)
FACTORY_LIST = []
class FactoryRegistry:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
FACTORY_LIST.append(cls)
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory, FactoryRegistry):
class Meta:
abstract = True
sqlalchemy_session = db.session
sqlalchemy_session_persistence = 'flush'
class ApplicationFactory(BaseFactory):
class Meta:
model = Application
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
application_guid = GUID
mine_guid = factory.SelfAttribute('mine.mine_guid')
application_no = factory.Sequence(lambda n: f'TX-{n}-TEST')
application_status_code = factory.LazyFunction(RandomApplicationStatusCode)
description = factory.Faker('sentence', nb_words=8, variable_nb_words=True)
received_date = TODAY
class DocumentManagerFactory(BaseFactory):
class Meta:
model = DocumentManager
class Params:
path_root = ''
document_guid = GUID
full_storage_path = factory.LazyAttribute(
lambda o: path.join(o.path_root, 'mine_no/category', o.file_display_name))
upload_started_date = TODAY
upload_completed_date = TODAY
file_display_name = factory.Faker('file_name')
path_display_name = factory.LazyAttribute(
lambda o: path.join(o.path_root, 'mine_name/category', o.file_display_name))
class MineDocumentFactory(BaseFactory):
class Meta:
model = MineDocument
class Params:
document_manager_obj = factory.SubFactory(
DocumentManagerFactory, file_display_name=factory.SelfAttribute('..document_name'))
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
mine_document_guid = GUID
mine_guid = factory.SelfAttribute('mine.mine_guid')
document_manager_guid = factory.SelfAttribute('document_manager_obj.document_guid')
document_name = factory.Faker('file_name')
mine_expected_document = []
class MineExpectedDocumentFactory(BaseFactory):
class Meta:
model = MineExpectedDocument
exp_document_guid = GUID
required_document = factory.LazyFunction(RandomRequiredDocument)
exp_document_status_code = factory.LazyFunction(RandomExpectedDocumentStatusCode)
exp_document_name = factory.SelfAttribute('required_document.req_document_name')
exp_document_description = factory.SelfAttribute('required_document.description')
due_date = TODAY
received_date = TODAY
hsrc_code = factory.SelfAttribute('required_document.hsrc_code')
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
related_documents = []
@factory.post_generation
def related_documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineDocumentFactory.create_batch(
size=extracted, mine_expected_document=[obj], mine=obj.mine, **kwargs)
class MineLocationFactory(BaseFactory):
class Meta:
model = MineLocation
mine_location_guid = GUID
latitude = factory.Faker('latitude') # or factory.fuzzy.FuzzyFloat(49, 60) for ~ inside BC
longitude = factory.Faker('longitude') # or factory.fuzzy.FuzzyFloat(-132, -114.7) for ~ BC
geom = factory.LazyAttribute(lambda o: 'SRID=3005;POINT(%f %f)' % (o.longitude, o.latitude))
mine_location_description = factory.Faker('sentence', nb_words=8, variable_nb_words=True)
effective_date = TODAY
expiry_date = TODAY
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
class MineStatusFactory(BaseFactory):
class Meta:
model = MineStatus
mine_status_guid = GUID
effective_date = TODAY
mine_status_xref = factory.LazyFunction(RandomMineStatusXref)
class MineTypeDetailFactory(BaseFactory):
class Meta:
model = MineTypeDetail
class Params:
tenure = 'MIN'
commodity = factory.Trait(
mine_commodity_code=factory.LazyAttribute(
lambda o: SampleMineCommodityCodes(o.tenure, 1)[0]))
disturbance = factory.Trait(
mine_disturbance_code=factory.LazyAttribute(
lambda o: SampleMineDisturbanceCodes(o.tenure, 1)[0]))
mine_type_detail_xref_guid = GUID
mine_commodity_code = None
mine_disturbance_code = None
class MineTypeFactory(BaseFactory):
class Meta:
model = MineType
mine_type_guid = GUID
mine_tenure_type_code = factory.LazyFunction(RandomTenureTypeCode)
mine_type_detail = []
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
@factory.post_generation
def mine_type_detail(obj, create, extracted, **kwargs):
if not create:
return
if extracted is None:
extracted = {}
commodities = extracted.get('commodities', 1)
commodities = SampleMineCommodityCodes(obj.mine_tenure_type_code, commodities)
disturbances = extracted.get('disturbances', 1)
disturbances = SampleMineDisturbanceCodes(obj.mine_tenure_type_code, disturbances)
for commodity in commodities:
MineTypeDetailFactory(
mine_type_guid=obj.mine_type_guid,
tenure=obj.mine_tenure_type_code,
mine_commodity_code=commodity,
**kwargs)
for disturbance in disturbances:
MineTypeDetailFactory(
mine_type_guid=obj.mine_type_guid,
tenure=obj.mine_tenure_type_code,
mine_disturbance_code=disturbance,
**kwargs)
class MineTailingsStorageFacilityFactory(BaseFactory):
class Meta:
model = MineTailingsStorageFacility
mine_tailings_storage_facility_guid = GUID
mine_tailings_storage_facility_name = factory.Faker('last_name')
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
class VarianceFactory(BaseFactory):
class Meta:
model = Variance
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
inspector = factory.SubFactory('tests.factories.PartyBusinessRoleFactory')
approved = factory.Trait(
variance_application_status_code='APP',
issue_date=TODAY,
expiry_date=TODAY,
inspector_party_guid=factory.SelfAttribute('inspector.party_guid'))
denied = factory.Trait(
variance_application_status_code='DEN',
inspector_party_guid=factory.SelfAttribute('inspector.party_guid'))
not_applicable = factory.Trait(variance_application_status_code='NAP')
variance_guid = GUID
compliance_article_id = factory.LazyFunction(RandomComplianceArticleId)
mine_guid = factory.SelfAttribute('mine.mine_guid')
note = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
parties_notified_ind = factory.Faker('boolean', chance_of_getting_true=50)
received_date = TODAY
documents = []
@factory.post_generation
def documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
VarianceDocumentFactory.create_batch(
size=extracted, variance=obj, mine_document__mine=None, **kwargs)
class VarianceDocumentFactory(BaseFactory):
class Meta:
model = VarianceDocumentXref
class Params:
mine_document = factory.SubFactory(
'tests.factories.MineDocumentFactory',
mine_guid=factory.SelfAttribute('..variance.mine_guid'))
variance = factory.SubFactory('tests.factories.VarianceFactory')
variance_document_xref_guid = GUID
mine_document_guid = factory.SelfAttribute('mine_document.mine_document_guid')
variance_id = factory.SelfAttribute('variance.variance_id')
variance_document_category_code = factory.LazyFunction(RandomVarianceDocumentCategoryCode)
def RandomPermitNumber():
return random.choice(['C-', 'CX-', 'M-', 'M-', 'P-', 'PX-', 'G-', 'Q-']) + str(
random.randint(1, 9999999))
class PermitFactory(BaseFactory):
class Meta:
model = Permit
permit_guid = GUID
permit_no = factory.LazyFunction(RandomPermitNumber)
permit_status_code = factory.LazyFunction(RandomPermitStatusCode)
permit_amendments = []
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
@factory.post_generation
def permit_amendments(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
for n in range(extracted):
PermitAmendmentFactory(permit=obj, initial_permit=(n == 0), **kwargs)
class PermitAmendmentFactory(BaseFactory):
class Meta:
model = PermitAmendment
class Params:
initial_permit = factory.Trait(
description='Initial permit issued.',
permit_amendment_type_code='OGP',
)
permit = factory.SubFactory(PermitFactory, permit_amendments=0)
permit_amendment_guid = GUID
permit_id = factory.SelfAttribute('permit.permit_id')
received_date = TODAY
issue_date = TODAY
authorization_end_date = factory.Faker('future_datetime', end_date='+30d')
permit_amendment_status_code = 'ACT'
permit_amendment_type_code = 'AMD'
description = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
documents = []
class PermitAmendmentDocumentFactory(BaseFactory):
class Meta:
model = PermitAmendmentDocument
class Params:
document_manager_obj = factory.SubFactory(
DocumentManagerFactory, file_display_name=factory.SelfAttribute('..document_name'))
permit_amendment_document_guid = GUID
permit_amendment_id = factory.SelfAttribute('permit_amendment.permit_amendment_id')
document_name = factory.Faker('file_name')
mine_guid = factory.SelfAttribute('permit_amendment.permit.mine.mine_guid')
document_manager_guid = factory.SelfAttribute('document_manager_obj.document_guid')
permit_amendment = factory.SubFactory(PermitAmendmentFactory)
class MineVerifiedStatusFactory(BaseFactory):
class Meta:
model = MineVerifiedStatus
healthy_ind = factory.Faker('boolean', chance_of_getting_true=50)
verifying_user = factory.Faker('name')
verifying_timestamp = TODAY
update_user = factory.Faker('name')
update_timestamp = TODAY
class MineIncidentFactory(BaseFactory):
class Meta:
model = MineIncident
class Params:
do_subparagraph_count = 2
mine_incident_id_year = 2019
mine_incident_guid = GUID
incident_timestamp = factory.Faker('past_datetime')
incident_description = factory.Faker('sentence', nb_words=20, variable_nb_words=True)
reported_timestamp = factory.Faker('past_datetime')
reported_by = factory.Faker('name')
reported_by_role = factory.Faker('job')
determination_type_code = factory.LazyFunction(RandomIncidentDeterminationTypeCode)
followup_type_code = 'NOA'
followup_inspection_no = factory.Faker('numerify', text='######') #nullable???
closing_report_summary = factory.Faker('sentence', nb_words=20, variable_nb_words=True)
dangerous_occurrence_subparagraphs = factory.LazyAttribute(
lambda o: SampleDangerousOccurrenceSubparagraphs(o.do_subparagraph_count)
if o.determination_type_code == 'DO' else [])
class AddressFactory(BaseFactory):
class Meta:
model = Address
address_line_1 = factory.Faker('street_address')
suite_no = factory.Iterator([None, None, '123', '123'])
address_line_2 = factory.Iterator([None, 'Apt. 123', None, 'Apt. 123'])
city = factory.Faker('city')
sub_division_code = factory.LazyFunction(RandomSubDivisionCode)
post_code = factory.Faker('bothify', text='?#?#?#', letters='ABCDEFGHIJKLMNOPQRSTUVWXYZ')
class PartyFactory(BaseFactory):
class Meta:
model = Party
class Params:
person = factory.Trait(
first_name=factory.Faker('first_name'),
party_name=factory.Faker('last_name'),
email=factory.LazyAttribute(lambda o: f'{o.first_name}.{o.party_name}@example.com'),
party_type_code='PER',
)
company = factory.Trait(
party_name=factory.Faker('company'),
email=factory.Faker('company_email'),
party_type_code='ORG',
)
party_guid = factory.LazyFunction(uuid.uuid4)
first_name = None
party_name = None
phone_no = factory.Faker('numerify', text='###-###-####')
phone_ext = factory.Iterator([None, '123'])
email = None
effective_date = TODAY
expiry_date = None
party_type_code = None
mine_party_appt = []
address = factory.List([factory.SubFactory(AddressFactory) for _ in range(1)])
class PartyBusinessRoleFactory(BaseFactory):
class Meta:
model = PartyBusinessRoleAppointment
party_business_role_code = factory.LazyFunction(RandomPartyBusinessRoleCode)
party = factory.SubFactory(PartyFactory, person=True)
start_date = TODAY
end_date = None
class MinePartyAppointmentFactory(BaseFactory):
class Meta:
model = MinePartyAppointment
mine_party_appt_guid = GUID
mine = factory.SubFactory('tests.factories.MineFactory')
party = factory.SubFactory(PartyFactory, person=True)
mine_party_appt_type_code = factory.LazyFunction(RandomMinePartyAppointmentTypeCode)
start_date = TODAY
end_date = None
processed_by = factory.Faker('first_name')
processed_on = TODAY
mine_tailings_storage_facility_guid = factory.LazyAttribute(
lambda o: o.mine.mine_tailings_storage_facilities[0].mine_tailings_storage_facility_guid if o.mine_party_appt_type_code == 'EOR' else None
)
permit_guid = factory.LazyAttribute(
lambda o: o.mine.mine_permit[0].permit_guid if o.mine_party_appt_type_code == 'PMT' else None
)
class CoreUserFactory(BaseFactory):
class Meta:
model = CoreUser
core_user_guid = GUID
email = factory.Faker('email')
phone_no = factory.Faker('numerify', text='###-###-####')
last_logon = TODAY
idir_user_detail = factory.RelatedFactory('tests.factories.IdirUserDetailFactory', 'core_user')
class IdirUserDetailFactory(BaseFactory):
class Meta:
model = IdirUserDetail
class Params:
core_user = factory.SubFactory(CoreUserFactory)
core_user_id = factory.SelfAttribute('core_user.core_user_id')
bcgov_guid = GUID
username = factory.Faker('first_name')
class MinespaceUserFactory(BaseFactory):
class Meta:
model = MinespaceUser
keycloak_guid = GUID
email = factory.Faker('email')
class SubscriptionFactory(BaseFactory):
class Meta:
model = Subscription
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
mine_guid = factory.SelfAttribute('mine.mine_guid')
user_name = factory.Faker('last_name')
class MineFactory(BaseFactory):
class Meta:
model = Mine
class Params:
minimal = factory.Trait(
mine_no=None,
mine_note=None,
mine_region='NE',
mine_location=None,
mine_type=None,
verified_status=None,
mine_status=None,
mine_tailings_storage_facilities=0,
mine_permit=0,
mine_expected_documents=0,
mine_incidents=0,
mine_variance=0,
)
mine_guid = GUID
mine_no = factory.Faker('ean', length=8)
mine_name = factory.Faker('company')
mine_note = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
major_mine_ind = factory.Faker('boolean', chance_of_getting_true=50)
mine_region = factory.LazyFunction(RandomMineRegionCode)
ohsc_ind = factory.Faker('boolean', chance_of_getting_true=50)
union_ind = factory.Faker('boolean', chance_of_getting_true=50)
mine_location = factory.RelatedFactory(MineLocationFactory, 'mine')
mine_type = factory.RelatedFactory(MineTypeFactory, 'mine')
verified_status = factory.RelatedFactory(MineVerifiedStatusFactory, 'mine')
mine_status = factory.RelatedFactory(MineStatusFactory, 'mine')
mine_tailings_storage_facilities = []
mine_permit = []
mine_expected_documents = []
mine_incidents = []
mine_variance = []
@factory.post_generation
def mine_tailings_storage_facilities(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineTailingsStorageFacilityFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_permit(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
PermitFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_expected_documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineExpectedDocumentFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_incidents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineIncidentFactory.create_batch(size=extracted, mine_guid=obj.mine_guid, **kwargs)
@factory.post_generation
def mine_variance(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
VarianceFactory.create_batch(size=extracted, mine=obj, **kwargs)
|
python
|
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU
class Decoder(Model):
def __init__(self, channels):
super().__init__()
self.conv1 = Conv2D(channels[0], 3, padding='SAME', use_bias=False)
self.bn1 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv2 = Conv2D(channels[1], 3, padding='SAME', use_bias=False)
self.bn2 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv3 = Conv2D(channels[2], 3, padding='SAME', use_bias=False)
self.bn3 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv4 = Conv2D(channels[3], 3, padding='SAME', use_bias=True)
self.relu = ReLU()
def call(self, x, training=None):
x4, x3, x2, x1, x0 = x
x = tf.image.resize(x4, tf.shape(x3)[1:3])
x = tf.concat([x, x3], axis=-1)
x = self.conv1(x, training=training)
x = self.bn1(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x2)[1:3])
x = tf.concat([x, x2], axis=-1)
x = self.conv2(x, training=training)
x = self.bn2(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x1)[1:3])
x = tf.concat([x, x1], axis=-1)
x = self.conv3(x, training=training)
x = self.bn3(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x0)[1:3])
x = tf.concat([x, x0], axis=-1)
x = self.conv4(x, training=training)
return x
|
python
|
import os
from typing import List, Tuple
from urllib.request import urlopen
from discord.ext import commands
from blurpo.func import database, send_embed, wrap
def basename(path: str) -> Tuple[str, str]:
# Get file's basename from url
# eg. https://website.com/index.html -> (index.html, index)
return (base := path.split('/')[-1]), base.split('.')[0]
def exts_list(chn_id: int) -> None:
with database() as db:
exts = list(db['Github'])
chunks = wrap('\n'.join(exts), code='bash')
send_embed(chn_id, chunks, title='Github Extensions', color=333333)
def ext_load(bot: commands.Bot, path: str) -> None:
base, name = basename(path)
url = 'https://raw.githubusercontent.com/' + path
with open(base, 'w') as f:
f.write(urlopen(url).read().decode('utf-8'))
try: bot.load_extension(name)
except commands.ExtensionAlreadyLoaded: bot.reload_extension(name)
finally: os.remove(base)
def exts_load(bot) -> List[str]:
with database() as db:
exts = db['Github']
loaded = []
for ext in exts.keys():
try:
ext_load(bot, exts[ext])
loaded.append(ext)
except Exception as e: print(e)
return loaded
class Github(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
with database() as db:
if 'Github' in db:
exts = exts_load(self.bot)
print(f'{exts} loaded')
else: db['Github'] = {}
@commands.command(
'gload',
brief='Load exts. Path: [owner/repo/branch/filepath]')
async def exts_load(self, ctx, *paths: str) -> None:
with database() as db:
for path in paths:
ext_load(self.bot, path)
_, ext = basename(path)
exts = db['Github']
exts[ext] = path
db['Github'] = exts
exts_list(ctx.channel.id)
@commands.command('gunld', brief='Unload exts')
async def exts_unload(self, ctx, *exts: str) -> None:
with database() as db:
for ext in exts:
es = db['Github']
if ext in es:
del es[ext]
db['Github'] = es
self.bot.unload_extension(ext)
exts_list(ctx.channel.id)
@commands.command('gexts', brief='List exts')
async def exts_list(self, ctx) -> None:
exts_list(ctx.channel.id)
@commands.command('greld', brief='Reload all exts')
async def ghExtsReload(self, ctx) -> None:
exts = exts_load(self.bot)
chunks = wrap('\n'.join(exts), code='bash')
send_embed(ctx.channel.id,
chunks,
title='Extensions Reloaded',
color=333333)
def setup(bot):
bot.add_cog(Github(bot))
|
python
|
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: Harsh Pandya
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import numpy as np
from numpy import array
from sympy import symbols, cos, sin, pi, sqrt, atan2
#### Transformation matrix function###
def Transform(q,d,a,alpha,s):
T = Matrix([[cos(q) , -sin(q) , 0 , a ],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), cos(q) * sin(alpha), cos(alpha), cos(alpha) * d ],
[0 , 0 , 0 , 1 ]])
return T.subs(s)
######################################
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print "No valid poses received"
return -1
else:
# Create symbols
q1,q2,q3,q4,q5,q6,q7 = symbols('q1:8')
d1,d2,d3,d4,d5,d6,d7 = symbols('d1:8')
a0,a1,a2,a3,a4,a5,a6 = symbols('a0:6')
alpha0,alpha1,alpha2,alpha3,alpha4,alpha5,alpha6 = symbols('alpha0:7')
################
# Create Modified DH parameters
s = {alpha0: 0, a0: 0, d1: 0.75,
alpha1: -90.0, a1: 0.35, d2: 0, q2: q2-90.0,
alpha2: 0, a2: 1.25, d3: 0,
alpha3: -90.0, a3: -0.054, d4: 1.5,
alpha4: 90.0, a4: 0, d5: 0,
alpha5: -90.0, a5: 0, d6: 0,
alpha6: 0, a6: 0, d7: 0.303, q7: 0}
################################
# Create individual transformation matrices
T0_1=Transform(q1,d1,a0,alpha0,s)
T1_2=Transform(q2,d2,a1,alpha1,s)
T2_3=Transform(q3,d3,a2,alpha2,s)
T3_4=Transform(q4,d4,a3,alpha3,s)
T4_5=Transform(q5,d5,a4,alpha4,s)
T5_6=Transform(q6,d6,a5,alpha5,s)
T6_G=Transform(q7,d7,a6,alpha6,s)
T0_G= T0_1*T1_2*T2_3*T3_4*T4_5*T5_6*T6_G
###########################################
# Creating function for Rotation matrices
R,P,Y = symbols('R P Y')
def Rot(symb,Roll=R,Pitch=P,Yaw=Y):
if symb == 'R':
Rot = Matrix([
[ 1, 0, 0],
[ 0, cos(Roll), -sin(Roll)],
[ 0, sin(Roll), cos(Roll)]])
elif symb == 'P':
Rot = Matrix([
[ cos(Pitch), 0, sin(Pitch)],
[ 0, 1, 0],
[-sin(Pitch), 0, cos(Pitch)]])
elif symb == 'Y':
Rot = Matrix([
[cos(Yaw), -sin(Yaw), 0],
[sin(Yaw), cos(Yaw), 0],
[ 0, 0, 1]])
return Rot
#######################################
# Accounting for Orientation Difference
Rot_x = Rot('R')
Rot_y = Rot('P')
Rot_z = Rot('Y')
Rot_F = Rot_z.subs(Y,radians(180))*Rot_Y.subs(P,radians(-90))
Rot_E = Rot_z*Rot_y*Rot_x
Rot_EE = Rot_E * Rot_F
#######################################
# Initialize service response
joint_trajectory_list = []
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
# Finding the position of WC according to End Effector
Rot_EE.subs({'R':roll , 'P':pitch , 'Y':yaw})
Pos_EE = Matrix([px,py,pz])
Pos_WC = Pos_EE - 0.303*Rot_EE[:,2]
WC_x = Pos_WC[0]
WC_y = Pos_WC[1]
WC_z = Pos_WC[2]
# Calculate joint angles using Geometric IK method
La = 1.502
Lc = 1.25
a1 = 0.35
d1 = 0.75
Lxy= sqrt(pow(WC_x, 2.) + pow(WC_y, 2.) ) - a1
Lz = WC_z - d1
Lb = sqrt(pow(Lxy, 2.) + pow(Lz, 2.))
a_ang = acos( ( pow(Lb, 2.) + pow(Lc, 2.) - pow(La, 2.)) / (2. * Lb * Lc) )
b_ang = acos( ( pow(La, 2.) + pow(Lc, 2.) - pow(Lb, 2.)) / (2. * La * Lc) )
c_ang = acos( ( pow(La, 2.) + pow(Lb, 2.) - pow(Lc, 2.)) / (2. * La * Lb) )
### Finding Theta 1,2,3
theta1 = atan2(WC_y , WC_x)
theta2 = 90. - a_ang - atan2(Lz/Lxy)
theta3 = 90. - Lb - atan2(0.054/1.5)
#######################
# Evaluating Transformation from 0 to 3
R0_3 = (T0_1 * T1_2 * T2_3).evalf(subs={theta1: theta1,theta2: theta2,theta3: theta3})[0:3, 0:3]
#######################################
# Evaluating Transformation from 3 to 6
R3_6 = R0_3.T * R_EE
theta4 = atan2(R3_6[2,2], -R3_6[0,2])
theta5 = atan2(sqrt(pow(R3_6[0,2], 2) + pow(R3_6[2,2], 2)), R3_6[1,2])
theta6 = atan2(-R3_6[1,1], R3_6[1,0])
#######################################
# Populate response for the IK request
# In the next line replace theta1,theta2...,theta6 by your joint angle variables
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
|
python
|
import codecs
import os
from hacktools import common
import constants
import game
def run(data, copybin=False, analyze=False):
infile = data + "extract/arm9.bin"
outfile = data + "repack/arm9.bin"
fontdata = data + "font_data.bin"
dictionarydata = data + "dictionary.asm"
binfile = data + "bin_input.txt"
datfile = data + "dat_input.txt"
binsize = os.path.getsize(infile)
table, invtable = game.getTable(data)
glyphs, dictionary = game.getGlyphs(data)
if not os.path.isfile(binfile):
common.logError("Input file", binfile, "not found")
return
if not os.path.isfile(datfile):
common.logError("Input file", datfile, "not found")
return
common.logMessage("Repacking BIN from", binfile, "...")
# Read all strings
translations = {}
strings = {}
with codecs.open(binfile, "r", "utf-8") as bin:
section = common.getSection(bin, "")
chartot, transtot = common.getSectionPercentage(section)
for jpstr in section:
if section[jpstr][0] != "":
translations[jpstr] = section[jpstr][0]
if section[jpstr][0] not in strings:
strings[section[jpstr][0]] = -1
elif jpstr not in strings:
strings[jpstr] = 0
if copybin:
common.copyFile(infile, outfile)
if os.path.isfile(data + "bmpcache.txt"):
os.remove(data + "bmpcache.txt")
lastfreepos = 0
with common.Stream(infile, "rb") as fin:
ptrgroups, allptrs = game.getBINPointerGroups(fin)
with common.Stream(outfile, "rb+") as f:
# Write all strings
outofspace = False
outchars = 0
lastgood = 0
f.seek(constants.mainptr["offset"])
for string in common.showProgress(strings):
writestr = string
if strings[string] == -1 and not writestr.startswith(">>") and "<ch1>" not in writestr and "<00>" not in writestr:
writestr = writestr.replace("<0A>", "|")
writestr = common.wordwrap(writestr, glyphs, constants.wordwrap, game.detectTextCode, default=0xa)
if outofspace:
common.logDebug("Skipping string", writestr)
outchars += len(writestr) - writestr.count("<") * 3
strings[string] = lastgood
else:
usedictionary = True
if writestr.startswith(">>"):
usedictionary = False
writestr = game.alignCenter(writestr[2:], glyphs) + "<00>"
common.logDebug("Writing string", writestr, "at", common.toHex(f.tell()))
strings[string] = lastgood = f.tell()
game.writeString(f, writestr, table, usedictionary and dictionary or {}, compress=usedictionary)
if "<ch1>" in writestr:
f.writeByte(0)
if f.tell() >= constants.mainptr["end"]:
outofspace = True
common.logMessage("Ran out of space while writing string", writestr)
common.logDebug("Finished at", common.toHex(f.tell()))
if outofspace:
common.logMessage("Characters left out:", outchars)
else:
lastfreepos = f.tell()
common.logMessage("Room for", common.toHex(constants.mainptr["end"] - lastfreepos), "more bytes")
# Change pointers
for ptrgroup in ptrgroups:
atstr = "@" + common.toHex(ptrgroup)
for ptr in ptrgroups[ptrgroup]:
f.seek(ptr["pos"])
fin.seek(ptr["ptr"])
if ptr["data"]:
jpstr = game.readData(fin, allptrs)
else:
jpstr = game.readString(fin, invtable, allptrs)
if jpstr + atstr in translations:
jpstr = translations[jpstr + atstr]
elif jpstr in translations:
jpstr = translations[jpstr]
if jpstr not in strings:
common.logError("String", jpstr, "not found")
else:
common.logDebug("Writing pointer", common.toHex(strings[jpstr]), "for string", jpstr, "at", common.toHex(f.tell()))
f.writeUInt(0x02000000 + strings[jpstr])
common.logMessage("Done! Translation is at {0:.2f}%".format((100 * transtot) / chartot))
common.logMessage("Text statistics:")
common.logMessage(" Groups printed: {0}".format(game.text_stats_groups))
common.logMessage(" Characters printed: {0}".format(game.text_stats_other))
common.logMessage(" Dictionary saved: {0}-{1} overhead ({2}%)".format(game.text_stats_dict_saved, game.text_stats_dict_overhead, (game.text_stats_dict_overhead * 100) // game.text_stats_dict_saved))
common.logMessage(" Compression saved: {0}".format(game.text_stats_compression_saving))
common.logMessage("Repacking DAT from", datfile, "...")
chartot = transtot = 0
with codecs.open(datfile, "r", "utf-8") as dat:
with common.Stream(infile, "rb") as fin:
with common.Stream(outfile, "rb+") as f:
for datname in constants.datptrs:
if type(constants.datptrs[datname]) is not list and "main" in constants.datptrs[datname]:
continue
section = common.getSection(dat, datname)
if len(section) == 0:
continue
chartot, transtot = common.getSectionPercentage(section, chartot, transtot)
datptrs = []
if type(constants.datptrs[datname]) is list:
for datoffset in constants.datptrs[datname]:
datptrs.append(datoffset)
else:
datptrs.append(constants.datptrs[datname])
# Read all strings first
allstrings = []
for datptr in datptrs:
writegroups = "writegroups" in datptr and datptr["writegroups"]
usedictionary = "dictionary" in datptr and datptr["dictionary"]
redirect = "redirect" in datptr and datptr["redirect"]
wordwrap = "wordwrap" in datptr and datptr["wordwrap"] or 0
aligncenter = "aligncenter" in datptr and datptr["aligncenter"] or 0
fin.seek(datptr["offset"])
if "end" in datptr:
while fin.tell() < datptr["end"]:
strstart = fin.tell()
jpstr = game.readString(fin, invtable)
fin.readZeros(binsize)
allstrings.append({"start": strstart, "end": fin.tell() - 1, "str": jpstr,
"writegroups": writegroups, "dictionary": usedictionary, "wordwrap": wordwrap, "aligncenter": aligncenter, "redirect": redirect})
else:
ptrs = []
for i in range(datptr["count"]):
ptrpos = fin.tell()
ptrs.append({"address": fin.readUInt() - 0x02000000, "pos": ptrpos})
if "skip" in datptr:
fin.seek(datptr["skip"], 1)
for i in range(datptr["count"]):
fin.seek(ptrs[i]["address"])
strstart = fin.tell()
jpstr = game.readString(fin, invtable)
fin.readZeros(binsize)
allstrings.append({"start": strstart, "end": fin.tell() - 1, "str": jpstr,
"ptrpos": ptrs[i]["pos"], "writegroups": writegroups, "dictionary": usedictionary, "wordwrap": wordwrap, "aligncenter": aligncenter, "redirect": redirect})
# Check how much space is used by these strings and update them with the translations
minpos = 0xffffffff
maxpos = 0
for jpstr in allstrings:
if jpstr["start"] < minpos:
minpos = jpstr["start"]
if jpstr["end"] > maxpos:
maxpos = jpstr["end"]
check = jpstr["str"]
if check in section and section[check][0] != "":
jpstr["str"] = section[check].pop()
if len(section[check]) == 0:
del section[check]
if jpstr["wordwrap"] > 0:
jpstr["str"] = common.wordwrap(jpstr["str"], glyphs, jpstr["wordwrap"], game.detectTextCode, default=0xa)
if jpstr["str"].startswith("<<"):
jpstr["str"] = game.alignLeft(jpstr["str"][2:], glyphs)
if jpstr["str"].startswith(">>"):
jpstr["str"] = game.alignCenter(jpstr["str"][2:], glyphs) + "<00>"
if jpstr["aligncenter"] > 0:
jpstr["str"] = game.alignCenterSpace(jpstr["str"], glyphs, jpstr["aligncenter"]) + "<00>"
if analyze:
allspace = []
for i in range(minpos, maxpos + 1):
allspace.append(i)
for jpstr in allstrings:
for i in range(jpstr["start"], jpstr["end"] + 1):
allspace.remove(i)
common.logMessage(datname)
common.logMessage(allspace)
# Start writing them
f.seek(minpos)
writingmain = False
for jpstr in allstrings:
if "ptrpos" in jpstr and datname != "ItemShop":
common.logDebug("Writing pointer string", jpstr["str"], "at", common.toHex(f.tell()))
# Write the string and update the pointer
strpos = f.tell()
stringfits = game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=maxpos - f.tell(), writegroups=jpstr["writegroups"], checkfit=jpstr["redirect"])
if jpstr["redirect"] and not stringfits and lastfreepos > 0 and not writingmain:
common.logDebug("String", jpstr["str"], "didn't fit, enabling writing to main...")
f.seek(lastfreepos)
maxpos = constants.mainptr["end"]
game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=maxpos - f.tell(), writegroups=jpstr["writegroups"], checkfit=jpstr["redirect"])
writingmain = True
f.writeUIntAt(jpstr["ptrpos"], strpos + 0x02000000)
else:
# Try to fit the string in the given space
f.seek(jpstr["start"])
common.logDebug("Writing fixed string", jpstr["str"], "at", common.toHex(f.tell()))
game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=jpstr["end"] - f.tell(), writegroups=jpstr["writegroups"])
while f.tell() < jpstr["end"]:
f.writeByte(0)
if writingmain:
lastfreepos = f.tell()
common.logMessage("Room for", common.toHex(constants.mainptr["end"] - lastfreepos), "more bytes")
common.logMessage("Done! Translation is at {0:.2f}%".format((100 * transtot) / chartot))
# Export font data, dictionary data and apply armips patch
with common.Stream(fontdata, "wb") as f:
for charcode in range(0x9010, 0x908f + 1):
c = invtable[charcode]
f.writeByte(glyphs[c].width)
with codecs.open(dictionarydata, "w", "utf-8") as f:
alldictionary = []
for dictentry in dictionary:
dictname = "DICTIONARY_" + common.toHex(dictionary[dictentry]).lower()
dictvalue = dictname + ":\n" + game.writeDictionaryString(dictentry, table)
f.write(".dw " + dictname + "\n")
alldictionary.append(dictvalue)
f.write("\n")
f.write("\n".join(alldictionary))
f.write("\n")
common.armipsPatch(common.bundledFile("bin_patch.asm"))
|
python
|
import time
import random
currentBot = 1
from threadly import Threadly
def worker(**kwargs):
botID = kwargs["botID"]
resultsQ = kwargs["resultsQ"]
time.sleep(random.randint(1,15))
resultsQ.put({"botID":botID, "time":time.time()})
def workerKwargs():
global currentBot
tosend = {"botID":"bot {}".format(currentBot)}
currentBot += 1
return tosend
def finish(**kwargs):
greeting = kwargs["greeting"]
resultsQ = kwargs["resultsQ"]
overallTime = kwargs["totalTime"]
print("{}, It took {} seconds".format(greeting, overallTime))
print("bot results")
for i in range(resultsQ.qsize()):
aresult = resultsQ.get()
print("bot {botID} finished at {time}".format(**aresult))
print("Starting..")
mytest = Threadly()
testerkwargs = {"workerFunc":worker,
"workerKwargGenFunc":workerKwargs,
"numberOfWorkers":10,
"numberOfThreads":2,
"finishFunc":finish,
"finishFuncKwargs":{"greeting":"Howdy"},
"delayBetweenThreads":0.1}
testerkwargs2 = {"workerFunc":worker,
"workerKwargGenFunc":workerKwargs,
"lengthOfTest":20,
"numberOfThreads":20,
"finishFunc":finish,
"finishFuncKwargs":{"greeting":"Howdy"},
"delayBetweenThreads":0.1}
random.seed()
mytest.runTest(**testerkwargs2)
print("Done")
|
python
|
"""
Searching for optimal parameters.
"""
from section1_video5_data import get_data
from sklearn import model_selection
from xgboost import XGBClassifier
seed=123
# Load prepared data
X, Y = get_data('../data/video1_diabetes.csv')
# Build our single model
c = XGBClassifier(random_state=seed)
#n_trees = range(500, 1000, 50)
#max_depth = range(1, 3) # 72.44% - {'max_depth': 1, 'n_estimators': 500}
#max_depth = range(3, 5) # 68.70% - {'max_depth': 3, 'n_estimators': 500}
n_trees = range(10, 500, 50)
max_depth = range(3, 5) # - 74.10% {'max_depth': 1, 'n_estimators': 260}
#max_depth = range(1, 3) # - 72.24% {'max_depth': 3, 'n_estimators': 60}
params_to_search = dict(n_estimators=n_trees, max_depth=max_depth)
grid_search = model_selection.GridSearchCV(c, params_to_search, scoring="neg_log_loss", n_jobs=-1, cv=10, iid=False)
grid_result = grid_search.fit(X, Y)
print("Found best params: %s" % (grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
params = grid_result.cv_results_['params']
for m, p in zip(means, params):
print("%f: %r" % (m, p))
# Check accuracy of a classfier once again
c = XGBClassifier(random_state=seed, **grid_result.best_params_)
results = c.fit(X, Y)
# using 10-fold Cross Validation.
results_kfold_model = model_selection.cross_val_score(c, X, Y, cv=10)
print("XGBoost accuracy:\t{:2.2f}%".format(results_kfold_model.mean()*100))
|
python
|
import numpy as np
from pytest import mark
from numpy.testing import assert_allclose
@mark.plots
def test_transition_map(init_plots):
axes_data = init_plots.plot_transition_map(cagr=False, full_frontier=False).lines[0].get_data()
values = np.genfromtxt('data/test_transition_map.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
@mark.plots
def test_plot_assets(init_plots):
axes_data = init_plots.plot_assets(tickers='names').collections[0].get_offsets().data
values = np.genfromtxt('data/test_plot_assets.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
@mark.plots
def test_plot_pair_ef(init_plots):
axes_data = init_plots.plot_pair_ef(tickers='names').lines[0].get_data()
values = np.genfromtxt('data/test_plot_pair_ef.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
|
python
|
import time
from membase.api.rest_client import RestConnection, Bucket
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from basetestcase import BaseTestCase
from mc_bin_client import MemcachedError
from couchbase_helper.documentgenerator import BlobGenerator
from threading import Thread
class StatsCrashRepro(BaseTestCase):
def setUp(self):
super(StatsRepro, self).setUp()
self.timeout = 120
self.bucket_name = self.input.param("bucket", "default")
self.bucket_size = self.input.param("bucket_size", 100)
self.data_size = self.input.param("data_size", 2048)
self.threads_to_run = self.input.param("threads_to_run", 5)
# self.nodes_in = int(self.input.param("nodes_in", 1))
# self.servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
# rebalance = self.cluster.async_rebalance(self.servers[:1], self.servs_in, [])
# rebalance.result()
bucket_params=self._create_bucket_params(server=self.servers[0], size=self.bucket_size, replicas=self.num_replicas)
self.cluster.create_default_bucket(bucket_params)
self.buckets.append(Bucket(name="default",
num_replicas=self.num_replicas, bucket_size=self.bucket_size))
rest = RestConnection(self.servers[0])
self.nodes_server = rest.get_nodes()
def tearDown(self):
super(StatsRepro, self).tearDown()
def _load_doc_data_all_buckets(self, op_type='create', start=0, expiry=0):
loaded = False
count = 0
gen_load = BlobGenerator('warmup', 'warmup-', self.data_size, start=start, end=self.num_items)
while not loaded and count < 60:
try :
self._load_all_buckets(self.servers[0], gen_load, op_type, expiry)
loaded = True
except MemcachedError as error:
if error.status == 134:
loaded = False
self.log.error("Memcached error 134, wait for 5 seconds and then try again")
count += 1
time.sleep(5)
def _get_stats(self, stat_str='all'):
# for server in self.nodes_server:
server = self.servers[0]
mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
stat_result = mc_conn.stats(stat_str)
# self.log.info("Getting stats {0} : {1}".format(stat_str, stat_result))
self.log.info("Getting stats {0}".format(stat_str))
mc_conn.close()
def _run_get(self):
server = self.servers[0]
mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
for i in range(self.num_items):
key = "warmup{0}".format(i)
mc_conn.get(key)
def run_test(self):
ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
active_resident_threshold = int(self.input.param("active_resident_threshold", 10))
mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
stats = mc.stats()
threshold = int(self.input.param('threshold', stats[ep_threshold]))
threshold_reached = False
self.num_items = self.input.param("items", 10000)
self._load_doc_data_all_buckets('create')
# load items till reached threshold or mem-ratio is less than resident ratio threshold
while not threshold_reached :
mem_used = int(mc.stats()["mem_used"])
if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
self.log.info("mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s " % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"]))
items = self.num_items
self.num_items += self.input.param("items", 10000)
self._load_doc_data_all_buckets('create', items)
else:
threshold_reached = True
self.log.info("DGM state achieved!!!!")
# wait for draining of data before restart and warm up
for bucket in self.buckets:
RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket, bucket_type=self.bucket_type)
while True:
# read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])
read_data_task = Thread(target=self._run_get)
read_data_task.start()
#5 threads to run stats all and reset asynchronously
start = time.time()
while (time.time() - start) < 300:
stats_all_thread = []
stats_reset_thread = []
for i in range(self.threads_to_run):
stat_str = ''
stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_all_thread[i].start()
stat_str = 'reset'
stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_reset_thread[i].start()
for i in range(self.threads_to_run):
stats_all_thread[i].join()
stats_reset_thread[i].join()
del stats_all_thread
del stats_reset_thread
# read_data_task.result()
read_data_task.join()
|
python
|
# -*- coding: utf-8 -*-
{
'name': "Odoo Cogito Move Mutual",
'summary': "",
'author': "CogitoWEB",
'description': "Odoo Cogito move mutual",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Test',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'view/account_mutual_view.xml',
# 'security/ir.model.access.csv',
# 'security/security.xml'
],
# only loaded in demonstration mode
'demo': [
# 'demo.xml',
],
'installable': True
}
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.