max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tmdb_client.py | SztMar/movies_catalogue | 0 | 11200 | <gh_stars>0
import requests
import json
import os
API_TOKEN = os.environ.get("TMDB_API_TOKEN", "")
def call_tmdb_api(_endpoint):
endpoint = f"https://api.themoviedb.org/3/{_endpoint}"
full_url = f'{endpoint}?api_key={API_TOKEN}'
response = requests.get(full_url)
response.raise_for_status()
return response.json()
def get_popular_movies():
return call_tmdb_api(f"movie/popular")
def get_movies_list(list_type):
return call_tmdb_api(f"movie/{list_type}")
def get_poster_url(poster_api_path, size="w324"):
base_url = "https://image.tmdb.org/t/p/"
return f"{base_url}{size}/{poster_api_path}"
def get_single_movie(movie_id):
return call_tmdb_api(f"movie/{movie_id}")
def get_single_movie_cast(movie_id):
return call_tmdb_api(f"movie/{movie_id}/credits")
def get_movies(how_many, list_type='popular'):
data = get_movies_list(list_type)
return data["results"][:how_many]
def get_movie_images(movie_id):
return call_tmdb_api(f"movie/{movie_id}/images")
| 2.640625 | 3 |
hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 0 | 11201 | <reponame>Christian-Castro/castro_odoo8
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import models, fields, api, _
class Tipo_Colaborador(models.Model):
_name = 'tipo.colaborador'
_rec_name = 'name'
name=fields.Char(string='Nombre')
active=fields.Boolean(string='Activo',default=True) | 1.75 | 2 |
spyder/utils/tests/test_environ.py | Nicztin/spyder | 1 | 11202 | <reponame>Nicztin/spyder
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for environ.py
"""
# Standard library imports
import os
# Test library imports
import pytest
# Third party imports
from qtpy.QtCore import QTimer
# Local imports
from spyder.utils.test import close_message_box
@pytest.fixture
def environ_dialog(qtbot):
"Setup the Environment variables Dialog taking into account the os."
QTimer.singleShot(1000, lambda: close_message_box(qtbot))
if os.name == 'nt':
from spyder.utils.environ import WinUserEnvDialog
dialog = WinUserEnvDialog()
else:
from spyder.utils.environ import EnvDialog
dialog = EnvDialog()
qtbot.addWidget(dialog)
return dialog
def test_environ(environ_dialog, qtbot):
"""Test the environment variables dialog."""
environ_dialog.show()
assert environ_dialog
if __name__ == "__main__":
pytest.main()
| 2.125 | 2 |
comsole.py | MumuNiMochii/Dumb_Dump | 1 | 11203 | import math
def main():
print("""
\tComsole by MumuNiMochii version beta 1.6.23
\t\"Originally made with C\"
\tMAIN MENU
\tWhat do you want to execute and evaluate?
\t1.) Add two addends
\t2.) Subtract a minuend from its subtrahend
\t3.) Multiply a multiplicand to its multiplier
\t4.) Divide a dividend to its divisor
\t5.) Raise to power a base number
\t6.) Get the square root of a number
\t7.) Compare two numbers
\t8.) Compare three numbers
\t9.) Auto-summation up to inputted value
\t10.) Auto-factorial up to inputted value
\t0.) Exit
""")
opt = int(input("\t\tEnter the number of your choice: "))
if opt == 1:
def add():
print("\n\tADD VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number " + str(x) + " is added by " + str(y) + ", and is equals to " + str(float(x + y)))
add()
elif opt == 2:
def sub():
print("\n\tSUBTRACT VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number " + str(x) + " is subtracted by " + str(y) + ", and is equals to " + str(float(x-y)))
sub()
elif opt == 3:
def mul():
print("\n\tMULTIPLY VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number "+str(x)+" is multiplied by "+str(y)+", and is equals to "+str(float(x*y)))
mul()
elif opt == 4:
def div():
print("\n\tDIVIDE VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number "+str(x)+" is divided by "+str(y)+", and is equals to "+str(float(x/y)))
div()
elif opt == 5:
def pow():
print("\n\tPOWERED VALUE")
x = float(input("\t1.) Enter a base value: "))
y = int(input("\t2.) Enter an exponent value: "))
print("\t3.) The number "+str(x)+" is raised to "+str(y)+", and is equals to "+str(math.pow(x, y))+".")
pow()
elif opt == 6:
def sqrt():
print("\n\tRADICAL VALUE")
x = float(input("\t1.) Enter a value: "))
y = math.sqrt(x)
print("\t2.) The number is "+str(int(x))+" and its square root is: "+str(y)+".")
sqrt()
elif opt == 7:
def comp2():
print("\n\tCOMPARE TWO VALUES")
x = int(input("\t1.) Enter a first value: "))
y = int(input("\t2.) Enter a second value: "))
msg = "\t3.) Your numbers are "+str(x)+", and "+str(y)+", where "
if x > y:
print(msg + str(x) + " is greater than " + str(y)+".")
else:
print(msg + str(y) + " is greater than " + str(x)+".")
comp2()
elif opt == 8:
def comp3():
print("\n\tCOMPARE THREE VALUES")
x = int(input("\t1.) Enter a first value: "))
y = int(input("\t2.) Enter a second value: "))
z = int(input("\t3.) Enter a third value: "))
msg = "\t4.) Your numbers are "+str(x)+", "+str(y)+", and "+str(z)+", where "
if x > y and x > z:
print(msg+str(x)+" is greater than the values "+str(y)+" and "+str(z)+".")
elif y > x and y > z:
print(msg+str(y)+" is greater than the values "+str(x)+" and "+str(z)+".")
else:
print(msg+str(z)+" is greater than the values "+str(x)+" and "+str(y)+".")
comp3()
elif opt == 9:
def summ():
print("\n\tSUMMATION UP TO INPUT VALUE")
x = int(input("\t1.) Count up to inputted number: "))
a = list(range(0, x))
a.append(x)
print("\t2.) Summation of numbers: " + str(a))
b = []
b.extend(a)
total = 0
for i in b:
total += i
print("\t3.) Sum: " + str(total))
summ()
elif opt == 10:
def fact():
print("\n\tFACTORIAL INPUT VALUE")
x = int(input("\t1.) Factorial the inputted number: "))
a = list(range(1, x))
a.append(x)
print("\t2.) List of factorials: "+str(a))
b = []
b.extend(a)
total = 1
for i in b:
total *= i
print("\t3.) Product: "+str(total))
fact()
else:
print("Invalid input.")
main()
| 4.0625 | 4 |
test/test_discussions.py | fibasile/ticket-gateway | 0 | 11204 | <reponame>fibasile/ticket-gateway<gh_stars>0
import unittest
import json
from server import server
from models.abc import db
from repositories import ChannelRepository, GitlabProvider
from unittest.mock import MagicMock, Mock
# from flask import make_response
# from flask.json import jsonify
from util import test_client
class TestDiscussions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = test_client(server)
cls._getTicket = GitlabProvider.getTicket
cls._addTicketDiscussion = GitlabProvider.addTicketDiscussion
cls._createTicketDiscussion = GitlabProvider.createTicketDiscussion
def setUp(self):
db.create_all()
ChannelRepository.create(
slug='a-channel',
title='test channel',
path='/dummy/path'
)
def tearDown(self):
db.session.remove()
db.drop_all()
cls = TestDiscussions
GitlabProvider.getTicket = cls._getTicket
GitlabProvider.addTicketDiscussion = cls._addTicketDiscussion
GitlabProvider.createTicketDiscussion = cls._createTicketDiscussion
def test_get(self):
"""The GET on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
GitlabProvider.getTicket = MagicMock()
GitlabProvider.getTicket.return_value = Mock(
discussions=Mock(list=Mock(return_value=[
{"id": "3243", "title": "test"}
])))
response = self.client.get(
'/api/channel/a-channel/tickets/some_ticket/discussions')
self.assertEqual(response.status_code, 200)
GitlabProvider.getTicket.assert_called_with(
'/dummy/path', 'some_ticket')
# GitlabProvider.getMembers.assert_called_with('/dummy/path')
response_json = json.loads(response.data.decode('utf-8'))
self.assertEqual(
response_json,
{'data': [{"id": "3243", "title": "test"}]}
)
def test_post_new(self):
"""POST on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
"""should create a comment in a bew discussion """
GitlabProvider.addTicketDiscussion = MagicMock(
name="addTicketDiscussion")
GitlabProvider.addTicketDiscussion.return_value = {"status": "success"}
response = self.client.post(
'/api/channel/a-channel/tickets/some_ticket/discussions',
json={
"discussion_id": "3232",
"user_id": "3234",
"body": "Some comment"
})
self.assertEqual(response.status_code, 201)
GitlabProvider.addTicketDiscussion.assert_called_with(
'/dummy/path', 'some_ticket', '3232', '3234', 'Some comment')
def test_post_existing(self):
"""POST on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
"""should create a comment in an existing discussion """
GitlabProvider.createTicketDiscussion = MagicMock(
name="createTicketDiscussion")
GitlabProvider.createTicketDiscussion.return_value = {
"status": "success"}
response = self.client.post(
'/api/channel/a-channel/tickets/some_ticket/discussions',
json={
"user_id": "3234",
"body": "Some comment"
})
self.assertEqual(response.status_code, 201)
GitlabProvider.createTicketDiscussion.assert_called_with(
'/dummy/path', 'some_ticket', '3234', 'Some comment')
| 2.3125 | 2 |
cogs/music.py | ETJeanMachine/Pouty-Bot-Discord | 0 | 11205 | <gh_stars>0
"""
This is an example cog that shows how you would make use of Lavalink.py.
This example cog requires that you have python 3.6 or higher due to the
f-strings.
"""
import math
import re
import discord
import lavalink
from discord.ext import commands
from discord.ext import menus
from .utils import checks
from typing import List
import asyncio
import logging
url_rx = re.compile('https?:\\/\\/(?:www\\.)?.+') # noqa: W605
class LavalinkVoiceClient(discord.VoiceProtocol):
def __init__(self, client: discord.Client, channel: discord.abc.Connectable):
self.client = client
self.channel = channel
self.connect_event = asyncio.Event()
async def on_voice_server_update(self, data):
lavalink_data = {
't': 'VOICE_SERVER_UPDATE',
'd': data
}
await self.lavalink.voice_update_handler(lavalink_data)
async def on_voice_state_update(self, data):
lavalink_data = {
't': 'VOICE_STATE_UPDATE',
'd': data
}
await self.lavalink.voice_update_handler(lavalink_data)
async def connect(self, *, timeout: float, reconnect: bool) -> None:
await self.channel.guild.change_voice_state(channel=self.channel)
try:
self.lavalink : lavalink.Client = self.client.lavalink
except AttributeError:
self.client.lavalink = self.lavalink = lavalink.Client(self.client.user.id)
self.client.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
async def disconnect(self, *, force: bool) -> None:
await self.channel.guild.change_voice_state(channel=None)
player = self.lavalink.player_manager.get(self.channel.guild.id)
if player:
player.channel_id = False
await player.stop()
self.cleanup()
class MusicQueue(menus.ListPageSource):
def __init__(self, data: List[lavalink.AudioTrack] , ctx: commands.Context, player):
self.ctx = ctx
self.player = player
super().__init__(data, per_page=10)
async def format_page(self, menu, entries):
offset = menu.current_page * self.per_page
embed = discord.Embed(title="Queue",
description="\n".join(
f'`{i+1}.` [{v.title}]({v.uri}) requested by **{self.ctx.guild.get_member(v.requester).name}**' for i, v in enumerate(entries, start=offset)
)
)
status = (f"\N{TWISTED RIGHTWARDS ARROWS} Shuffle: {'enabled' if self.player.shuffle else 'disabled'} | "
f"\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS} Repeat: {'enabled' if self.player.repeat else 'disabled'} | "
f"\N{SPEAKER} Volume : {self.player.volume}")
embed.set_footer(text=status)
return embed
def can_stop():
def predicate(ctx):
if not ctx.guild:
raise commands.CheckFailure("Only usable within a server")
if not ctx.guild.me.voice:
return True
my_voice = ctx.guild.me.voice.channel
try:
if checks.is_owner_or_moderator_check(ctx.message):
return True
except commands.CheckFailure:
pass
if ctx.guild.me.voice:
if len(my_voice.members) == 2 and ctx.author in my_voice.members:
return True
if len(my_voice.members) == 1:
return True
raise commands.CheckFailure(
"Can only use this when nobody or "
"only one user in voice channel with me"
)
return commands.check(predicate)
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
if not hasattr(self.bot, 'lavalink'):
self.bot.lavalink = lavalink.Client(self.bot.user.id)
self.bot.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
self.bot.lavalink.add_event_hook(self.track_hook)
self.pages = {}
self.skip_votes = {}
def current_voice_channel(self, ctx):
if ctx.guild and ctx.guild.me.voice:
return ctx.guild.me.voice.channel
return None
def cog_unload(self):
self.bot.lavalink._event_hooks.clear()
async def cog_before_invoke(self, ctx):
guild_check = ctx.guild is not None
# This is essentially the same as `@commands.guild_only()`
# except it saves us repeating ourselves (and also a few lines).
if guild_check:
# Ensure that the bot and command author
# share a mutual voicechannel.
await self.ensure_voice(ctx)
return guild_check
async def cog_after_invoke(self,ctx):
for page in self.pages.get(ctx.message.guild.id, []):
await page.show_page(page.current_page)
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
"""
deafen yourself when joining a voice channel
"""
if member.id == member.guild.me.id and after.channel is None:
if member.guild.voice_client:
await member.guild.voice_client.disconnect(force=True)
await self.bot.change_presence(activity=None)
if member.id != member.guild.me.id or not after.channel:
return
my_perms = after.channel.permissions_for(member)
if not after.deaf and my_perms.deafen_members:
await member.edit(deafen=True)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
guild : discord.Guild = self.bot.get_guild(guild_id)
await guild.voice_client.disconnect(force=True)
# Disconnect from the channel -- there's nothing else to play.
if isinstance(event, lavalink.events.TrackEndEvent):
if self.skip_votes and guild_id in self.skip_votes.keys():
self.skip_votes[guild_id].clear()
if isinstance(event, lavalink.events.TrackStartEvent):
await self.bot.change_presence(
activity=discord.Activity(type=discord.ActivityType.listening, name=event.player.current.title)
)
if isinstance(event, lavalink.events.TrackExceptionEvent):
channel = event.player.fetch('channel')
await channel.send(f"Error while playing Track: "
f"**{event.track.title}**:"
f"\n`{event.exception}`")
@commands.group(aliases=['p'],invoke_without_command=True)
async def play(self, ctx, *, query: str):
""" Searches and plays a song from a given query. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = (f'{results["playlistInfo"]["name"]}'
f'- {len(tracks)} tracks')
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = (f'[{track["info"]["title"]}]'
f'({track["info"]["uri"]})')
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@play.command("soundcloud", aliases=['sc'])
async def sc_play(self, ctx, *, query: str):
"""
search and play songs from soundcloud
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'scsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = (f'{results["playlistInfo"]["name"]}'
f'- {len(tracks)} tracks')
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = (f'[{track["info"]["title"]}]'
f'({track["info"]["uri"]})')
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@commands.command()
async def seek(self, ctx, *, seconds: int):
""" Seeks to a given position in a track. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.id != player.current.requester:
return await ctx.send("Only requester can seek.")
track_time = player.position + (seconds * 1000)
await player.seek(track_time)
await ctx.send(
f'Moved track to **{lavalink.utils.format_time(track_time)}**'
)
@commands.command(name="fskip", aliases=['forceskip'])
@checks.is_owner_or_moderator()
async def force_skip(self, ctx):
"""
can only be invoked by moderators,
immediately skips the current song
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
await player.skip()
if self.skip_votes:
self.skip_votes[ctx.guild.id].clear()
await ctx.send("⏭ | Skipped by moderator")
@commands.command()
async def skip(self, ctx):
"""
if invoked by requester skips the current song
otherwise starts a skip vote, use again to remove skip vote
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
current_voice = self.current_voice_channel(ctx)
if (ctx.author.id == player.current.requester
or len(current_voice.members) <= 2):
await player.skip()
if ctx.guild.id in self.skip_votes.keys():
self.skip_votes[ctx.guild.id].clear()
await ctx.send('⏭ | Skipped by requester.')
else:
if ctx.guild.id not in self.skip_votes.keys():
self.skip_votes[ctx.guild.id] = {ctx.author.id}
else:
if ctx.author.id in self.skip_votes.values():
self.skip_votes[ctx.guild.id].remove(ctx.author.id)
else:
self.skip_votes[ctx.guild.id].add(ctx.author.id)
skip_vote_number = len(self.skip_votes[ctx.guild.id])
number_of_users_in_voice = len(current_voice.members)-1
if skip_vote_number >= number_of_users_in_voice / 2:
await player.skip()
self.skip_votes[ctx.guild.id].clear()
await ctx.send('⏭ | Skip vote passed.')
else:
votes_needed = \
math.ceil(number_of_users_in_voice/2) - skip_vote_number
await ctx.send(f"current skip vote: "
f"{votes_needed}"
f"more vote(s) needed "
f"for skip")
@commands.command(aliases=['np', 'n', 'playing'])
async def now(self, ctx):
""" Shows some stats about the currently playing song. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.current:
return await ctx.send('Nothing playing.')
position = lavalink.utils.format_time(player.position)
requester = ctx.guild.get_member(player.current.requester)
if player.current.stream:
duration = '🔴 LIVE'
else:
duration = lavalink.utils.format_time(player.current.duration)
song = (f'**[{player.current.title}]({player.current.uri})**\n'
f'({position}/{duration}) '
f'requested by '
f'**{requester.display_name if requester else "?"}**')
embed = discord.Embed(color=discord.Color.blurple(),
title='Now Playing', description=song)
status = (f"\N{TWISTED RIGHTWARDS ARROWS} Shuffle: {'enabled' if player.shuffle else 'disabled'} | "
f"\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS} Repeat: {'enabled' if player.repeat else 'disabled'} | "
f"\N{SPEAKER} Volume : {player.volume}")
embed.set_footer(text=status)
await ctx.send(embed=embed)
@commands.Cog.listener(name="on_reaction_clear")
async def remove_page_on_menu_close(self, message, reactions):
current_pages = self.pages.get(message.guild.id, None)
if not current_pages:
return
found_page = next(filter(lambda p: p.message == message, current_pages), None)
if found_page:
self.pages[message.guild.id].remove(found_page)
@commands.command(aliases=['q', 'playlist'])
async def queue(self, ctx):
""" Shows the player's queue. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued.')
pages= menus.MenuPages(source=MusicQueue(player.queue, ctx, player), clear_reactions_after=True)
await pages.start(ctx)
if ctx.guild.id in self.pages:
self.pages[ctx.guild.id].append(pages)
else:
self.pages[ctx.guild.id] = [pages]
@commands.command(aliases=['resume'])
@checks.is_owner_or_moderator()
async def pause(self, ctx):
""" Pauses/Resumes the current track. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
if player.paused:
await player.set_pause(False)
await ctx.send('⏯ | Resumed')
else:
await player.set_pause(True)
await ctx.send('⏯ | Paused')
@commands.command(aliases=['vol'])
@checks.is_owner_or_moderator()
async def volume(self, ctx, volume: int = None):
""" Changes the player's volume (0-1000). """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
# Lavalink will automatically cap values between, or equal to 0-1000.
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command()
async def shuffle(self, ctx):
""" Shuffles the player's queue. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Nothing playing.')
player.shuffle = not player.shuffle
await ctx.send(f'🔀 | Shuffle '
f'{"enabled" if player.shuffle else "disabled"}')
@commands.command(aliases=['loop'])
async def repeat(self, ctx):
"""
Repeats the current song until the command is invoked again
or until a new song is queued.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Nothing playing.')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command()
async def remove(self, ctx, index: int):
""" Removes an item from the player's queue with the given index. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
can_remove = False
try:
can_remove = checks.is_owner_or_moderator_check(ctx.message)
except commands.CheckFailure:
pass
if can_remove or ctx.author.id == player.queue[index-1].requester:
if not player.queue:
return await ctx.send('Nothing queued.')
if index > len(player.queue) or index < 1:
return await ctx.send(f'Index has to be **between** 1 and {len(player.queue)}')
removed = player.queue.pop(index - 1) # Account for 0-index.
await ctx.send(f'Removed **{removed.title}** from the queue.')
else:
await ctx.send("Only requester and moderators can remove from the list")
@commands.group(aliases=["search"], invoke_without_command=True)
async def find(self, ctx, *, query):
""" Lists the first 10 search results from a given query.
also allows you to queue one of the results (use p and the index number)
for example p 1 to play the first song in the results.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
original_query = query
if not query.startswith('ytsearch:') and not query.startswith('scsearch:'):
query = 'ytsearch:' + query
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found.')
tracks = results['tracks'][:10] # First 10 results
o = (f"The first 10 results found via query `{original_query}`\n"
f"use `queue` or `play` followed by the number of the result to queue that song\n")
for index, track in enumerate(tracks, start=1):
track_title = track['info']['title']
track_uri = track['info']['uri']
o += f'`{index}.` [{track_title}]({track_uri})\n'
embed = discord.Embed(color=discord.Color.blurple(), description=o)
await ctx.send(embed=embed)
def queue_check(message):
if not re.match(r"(q(uery)?|p(lay)?)", message.content):
return False
try:
get_message_numbers = ''.join(c for c in message.content if c.isdigit())
number = int(get_message_numbers)
except ValueError:
raise commands.CommandError("please choose a number between 1 and 10")
return (number >= 1 or number <= 10) and message.channel == ctx.channel and message.author == ctx.author
try:
msg = await ctx.bot.wait_for("message", check=queue_check, timeout=10.0)
except asyncio.TimeoutError:
return
get_message_numbers = ''.join(c for c in msg.content if c.isdigit())
result_number = int(get_message_numbers)
ctx.command = self.play
await self.cog_before_invoke(ctx)
await ctx.invoke(self.play, query=tracks[result_number-1]['info']['uri'])
@find.group(name="scsearch",aliases=["sc", "soundcloud"], invoke_without_command=True)
async def find_sc(self, ctx, *, query):
""" Lists the first 10 soundcloud search results from a given query.
also allows you to queue one of the results (use p and the index number)
for example p 1 to play the first song in the results.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
original_query = query
query = 'scsearch:' + query
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found.')
tracks = results['tracks'][:10] # First 10 results
o = (f"The first 10 results found via query `{original_query}`\n"
f"use `queue` or `play` followed by the number of the result to queue that song\n")
for index, track in enumerate(tracks, start=1):
track_title = track['info']['title']
track_uri = track['info']['uri']
o += f'`{index}.` [{track_title}]({track_uri})\n'
embed = discord.Embed(color=discord.Color.blurple(), description=o)
await ctx.send(embed=embed)
def queue_check(message):
if not re.match(r"(q(uery)?|p(lay)?)", message.content):
return False
try:
get_message_numbers = ''.join(c for c in message.content if c.isdigit())
number = int(get_message_numbers)
except ValueError:
raise commands.CommandError("please choose a number between 1 and 10")
return (number >= 1 or number <= 10) and message.channel == ctx.channel and message.author == ctx.author
try:
msg = await ctx.bot.wait_for("message", check=queue_check, timeout=10.0)
except asyncio.TimeoutError:
return
get_message_numbers = ''.join(c for c in msg.content if c.isdigit())
result_number = int(get_message_numbers)
ctx.command = self.play
await self.cog_before_invoke(ctx)
await ctx.invoke(self.play, query=tracks[result_number-1]['info']['uri'])
@commands.command(aliases=['dc','stop','leave','quit'])
@can_stop()
async def disconnect(self, ctx: commands.Context):
""" Disconnects the player from the voice channel and clears its queue. """
await ctx.voice_client.disconnect(force=True)
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_connected:
return await ctx.send('Not connected.')
player.queue.clear()
await player.stop()
await ctx.send('*⃣ | Disconnected.')
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
should_connect = ctx.command.name in ('play', 'junbi_ok','soundcloud') # Add commands that require joining voice to work.
if should_connect and self.current_voice_channel(ctx) is None:
if self.bot.lavalink.node_manager.available_nodes:
voice_client = await ctx.author.voice.channel.connect(cls=LavalinkVoiceClient)
player : lavalink.DefaultPlayer = self.bot.lavalink.player_manager.create(ctx.guild.id)
player.store("channel", ctx.channel)
else:
raise commands.CommandError("No audio player nodes available. Please wait a few minutes for a reconnect")
elif self.current_voice_channel(ctx) is not None and not self.bot.lavalink.node_manager.available_nodes:
await ctx.guild.voice_client.disconnect(force=True)
raise commands.CommandError("No audio player nodes available. Please wait a few minutes for a reconnect")
if ctx.command.name in ('find', 'scsearch', 'disconnect', 'now', 'queue'):
return
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CheckFailure('Join a voicechannel first.')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CheckFailure('I need the `CONNECT` and `SPEAK` permissions.')
@commands.command(name="lc")
@checks.is_owner()
async def lavalink_reconnect(self, ctx):
self.bot.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
async def setup(bot):
await bot.add_cog(Music(bot))
| 2.578125 | 3 |
utils/data_processing.py | LisaAnne/LocalizingMoments | 157 | 11206 | import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
def word_tokenize(s):
sent = s.lower()
sent = re.sub('[^A-Za-z0-9\s]+',' ', sent)
return sent.split()
def sentences_to_words(sentences):
words = []
for s in sentences:
words.extend(word_tokenize(str(s.lower())))
return words
class glove_embedding(object):
''' Creates glove embedding object
'''
def __init__(self, glove_file=glove_path):
glove_txt = open(glove_file).readlines()
glove_txt = [g.strip() for g in glove_txt]
glove_vector = [g.split(' ') for g in glove_txt]
glove_words = [g[0] for g in glove_vector]
glove_vecs = [g[1:] for g in glove_vector]
glove_array = np.zeros((glove_dim, len(glove_words)))
glove_dict = {}
for i, w in enumerate(glove_words): glove_dict[w] = i
for i, vec in enumerate(glove_vecs):
glove_array[:,i] = np.array(vec)
self.glove_array = glove_array
self.glove_dict = glove_dict
self.glove_words = glove_words
class zero_language_vector(object):
def __init__(self, data):
self.dim = glove_dim
def get_vector_dim(self):
return self.dim
def get_vocab_size(self):
return 0
def preprocess(self, data):
embedding = np.zeros((self.get_vector_dim(),))
for d in data:
d['language_input'] = embedding
d['gt'] = (d['gt'][0], d['gt'][1])
return data
class recurrent_language(object):
def get_vocab_size(self):
return len(self.vocab_dict.keys())
def preprocess_sentence(self, words):
vector_dim = self.get_vector_dim()
sentence_mat = np.zeros((len(words), vector_dim))
count_words = 0
for i, w in enumerate(words):
try:
sentence_mat[count_words,:] = self.vocab_dict[w]
count_words += 1
except:
if '<unk>' in self.vocab_dict.keys():
sentence_mat[count_words,:] = self.vocab_dict['<unk>']
count_words += 1
else:
pass
sentence_mat = sentence_mat[:count_words]
return sentence_mat
def preprocess(self, data):
for d in data:
words = sentences_to_words([d['description']])
d['language_input'] = self.preprocess(words)
return data
class recurrent_word(recurrent_language):
def __init__(self, data):
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' not in vocab:
vocab.append('<unk>')
vocab_dict = {}
for i, word in enumerate(vocab):
vocab_dict[word] = i
self.vocab_dict = vocab_dict
def get_vector_dim(self):
return 1
class recurrent_embedding(recurrent_language):
def read_embedding(self):
print "Reading glove embedding"
embedding = glove_embedding(glove_path)
self.embedding = embedding
def get_vector_dim(self):
return glove_dim
def __init__(self, data):
self.read_embedding()
embedding = self.embedding
vector_dim = self.get_vector_dim()
self.data = data
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' in vocab:
vocab.remove('<unk>') #don't have an <unk> vector. Alternatively, could map to random vector...
vocab_dict = {}
for i, word in enumerate(vocab):
try:
vocab_dict[word] = embedding.glove_array[:,embedding.glove_dict[word]]
except:
print "%s not in glove embedding" %word
self.vocab_dict = vocab_dict
def preprocess(self, data):
vector_dim = self.get_vector_dim()
for d in data:
d['language_input'] = sentences_to_words([d['description']])
return data
def get_vocab_dict(self):
return self.vocab_dict
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
class extractData(object):
""" General class to extract data.
"""
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
class extractLanguageFeatures(extractData):
def __init__(self, dataset, params, result=None):
self.data_list = range(len(dataset))
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.vocab_dict = params['vocab_dict']
self.batch_size = params['batch_size']
self.num_glove_centroids = self.vocab_dict.values()[0].shape[0]
self.T = params['sentence_length']
if isinstance(result, dict):
self.result = result
self.query_key = params['query_key']
self.cont_key = params['cont_key']
self.top_keys = [self.query_key, self.cont_key]
self.top_shapes = [(self.T, self.batch_size, self.num_glove_centroids),
(self.T, self.batch_size)]
else:
print "Will only be able to run in test mode"
def get_features(self, query):
feature = np.zeros((self.T, self.num_glove_centroids))
cont = np.zeros((self.T,))
len_query = min(len(query), self.T)
if len_query < len(query):
query = query[:len_query]
for count_word, word in enumerate(query):
try:
feature[-(len_query)+count_word,:] = self.vocab_dict[word]
except:
feature[-(len_query)+count_word,:] = np.zeros((glove_dim,))
cont[-(len_query-1):] = 1
assert np.sum(feature[:-len_query,:]) == 0
return feature, cont
def get_data_test(self, data):
query = data['language_input']
return self.get_features(query)
def get_data(self, next_batch):
data = self.dataset
query_mat = np.zeros((self.T, self.batch_size, self.num_glove_centroids))
cont = np.zeros((self.T, self.batch_size))
for i, nb in enumerate(next_batch):
query = data[nb]['language_input']
query_mat[:,i,:], cont[:,i] = self.get_features(query)
self.result[self.query_key] = query_mat
self.result[self.cont_key] = cont
class extractVisualFeatures(extractData):
def __init__(self, dataset, params, result):
self.data_list = range(len(dataset))
self.feature_process_algo = params['feature_process']
self.loc_feature = params['loc_feature']
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.loc = params['loc_feature']
loss_type = params['loss_type']
assert loss_type in ['triplet', 'inter', 'intra']
self.inter = False
self.intra = False
if loss_type in ['triplet', 'inter']:
self.inter = True
if loss_type in ['triplet', 'intra']:
self.intra = True
self.batch_size = params['batch_size']
self.num_glove_centroids = params['num_glove_centroids']
features_h5py = h5py.File(params['features'])
features = {}
for key in features_h5py.keys():
features[key] = np.array(features_h5py[key])
features_h5py.close()
self.features = features
assert self.feature_process_algo in feature_process_dict.keys()
self.feature_process = feature_process_dict[self.feature_process_algo]
self.feature_dim = self.feature_process(0,0,self.features[self.dataset[0]['video']]).shape[-1]
self.result = result
self.feature_key_p = params['feature_key_p']
self.feature_time_stamp_p = params['feature_time_stamp_p']
self.feature_time_stamp_n = params['feature_time_stamp_n']
self.top_keys = [self.feature_key_p, self.feature_time_stamp_p, self.feature_time_stamp_n]
self.top_shapes = [(self.batch_size, self.feature_dim),
(self.batch_size, 2),
(self.batch_size,2)]
if self.inter:
self.feature_key_inter = 'features_inter'
self.top_keys.append(self.feature_key_inter)
self.top_shapes.append((self.batch_size, self.feature_dim))
if self.intra:
self.feature_key_intra = 'features_intra'
self.top_keys.append(self.feature_key_intra)
self.top_shapes.append((self.batch_size, self.feature_dim))
self.possible_annotations = possible_segments
def get_data_test(self, d):
video_feats = self.features[d['video']]
features = np.zeros((len(self.possible_annotations), self.feature_dim))
loc_feats = np.zeros((len(self.possible_annotations), 2))
for i, p in enumerate(self.possible_annotations):
features[i,:] = self.feature_process(p[0], p[1], video_feats)
loc_feats[i,:] = [p[0]/6., p[1]/6.]
return features, loc_feats
def get_data(self, next_batch):
feature_process = self.feature_process
data = self.dataset
features_p = np.zeros((self.batch_size, self.feature_dim))
if self.inter: features_inter = np.zeros((self.batch_size, self.feature_dim))
if self.intra: features_intra = np.zeros((self.batch_size, self.feature_dim))
features_time_stamp_p = np.zeros((self.batch_size, 2))
features_time_stamp_n = np.zeros((self.batch_size, 2))
for i, nb in enumerate(next_batch):
rint = random.randint(0,len(data[nb]['times'])-1)
gt_s = data[nb]['times'][rint][0]
gt_e = data[nb]['times'][rint][1]
possible_n = list(set(self.possible_annotations) - set(((gt_s,gt_e),)))
random.shuffle(possible_n)
n = possible_n[0]
assert n != (gt_s, gt_e)
video = data[nb]['video']
feats = self.features[video]
if self.inter:
other_video = data[nb]['video']
while (other_video == video):
other_video_index = int(random.random()*len(data))
other_video = data[other_video_index]['video']
feats_inter = self.features[other_video]
features_p[i,:] = feature_process(gt_s, gt_e, feats)
if self.intra:
features_intra[i,:] = feature_process(n[0], n[1], feats)
if self.inter:
try:
features_inter[i,:] = feature_process(gt_s, gt_e, feats_inter)
except:
pdb.set_trace()
if self.loc:
features_time_stamp_p[i,0] = gt_s/6.
features_time_stamp_p[i,1] = gt_e/6.
features_time_stamp_n[i,0] = n[0]/6.
features_time_stamp_n[i,1] = n[1]/6.
else:
features_time_stamp_p[i,0] = 0
features_time_stamp_p[i,1] = 0
features_time_stamp_n[i,0] = 0
features_time_stamp_n[i,1] = 0
assert not math.isnan(np.mean(self.features[data[nb]['video']][n[0]:n[1]+1,:]))
assert not math.isnan(np.mean(self.features[data[nb]['video']][gt_s:gt_e+1,:]))
self.result[self.feature_key_p] = features_p
self.result[self.feature_time_stamp_p] = features_time_stamp_p
self.result[self.feature_time_stamp_n] = features_time_stamp_n
if self.inter:
self.result[self.feature_key_inter] = features_inter
if self.intra:
self.result[self.feature_key_intra] = features_intra
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
self.increment_extractor = extractors[0]
def __call__(self):
#The batch advancer just calls each extractor
next_batch = self.increment_extractor.increment()
for e in self.extractors:
e.get_data(next_batch)
class python_data_layer(caffe.Layer):
""" General class to extract data.
"""
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
assert 'top_names' in params.keys()
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
shape_dict = {}
self.top_names = []
for de in self.data_extractors:
for top_name, top_shape in zip(de.top_keys, de.top_shapes):
shape_dict[top_name] = top_shape
self.top_names.append((params['top_names'].index(top_name), top_name))
self.dispatch_worker()
self.top_shapes = [shape_dict[tn[1]] for tn in self.top_names]
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
top_count = 0
for top_index, name in self.top_names:
shape = self.top_shapes[top_count]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
top_count += 1
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in self.top_names:
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propoagate_down, bottom):
pass
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
class dataLayer_ExtractPairedLanguageVision(python_data_layer):
def setup_extractors(self):
assert 'top_names' in self.params.keys()
assert 'descriptions' in self.params.keys()
assert 'features' in self.params.keys()
if 'batch_size' not in self.params.keys(): self.params['batch_size'] = 120
self.params['query_key'] = 'query'
self.params['feature_key_n'] = 'features_n'
self.params['feature_key_p'] = 'features_p'
self.params['feature_key_t'] = 'features_t'
self.params['feature_time_stamp_p'] = 'features_time_stamp_p'
self.params['feature_time_stamp_n'] = 'features_time_stamp_n'
self.params['cont_key'] = 'cont'
language_extractor_fcn = extractLanguageFeatures
visual_extractor_fcn = extractVisualFeatures
language_process = recurrent_embedding
data_orig = read_json(self.params['descriptions'])
random.shuffle(data_orig)
language_processor = language_process(data_orig)
data = language_processor.preprocess(data_orig)
self.params['vocab_dict'] = language_processor.vocab_dict
num_glove_centroids = language_processor.get_vector_dim()
self.params['num_glove_centroids'] = num_glove_centroids
visual_feature_extractor = visual_extractor_fcn(data, self.params, self.thread_result)
textual_feature_extractor = language_extractor_fcn(data, self.params, self.thread_result)
self.data_extractors = [visual_feature_extractor, textual_feature_extractor]
| 2.265625 | 2 |
old_metrics/bleu.py | Danial-Alh/fast-bleu | 21 | 11207 | <reponame>Danial-Alh/fast-bleu<filename>old_metrics/bleu.py<gh_stars>10-100
import math
import os
from collections import Counter
from fractions import Fraction
import numpy as np
from nltk import ngrams
from nltk.translate.bleu_score import SmoothingFunction
from .utils import get_ngrams, Threader
def corpus_bleu(references,
hypothesis,
reference_max_counts,
ref_lens,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweight=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweight: Option to re-normalize the weights uniformly.
:type auto_reweight: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
# Iterate through each hypothesis and their corresponding references.
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(reference_max_counts, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(ref_lens, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweight:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = reference_max_counts[n - 1]
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
def closest_ref_length(ref_lens, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len
def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len)
class Bleu(): # this class speedup computation when reference is same for multisample
# Base on https://www.nltk.org/_modules/nltk/translate/bleu_score.html
def __init__(self, references, weights=np.ones(3) / 3., smoothing_function=SmoothingFunction().method1,
auto_reweight=False, process_num=None, other_instance=None):
self.references = references
self.weights = weights
self.smoothing_function = smoothing_function
self.auto_reweight = auto_reweight
self.max_n = len(weights)
if process_num is None:
self.process_num = os.cpu_count()
else:
self.process_num = process_num
print('bleu{} init!'.format(self.max_n))
if other_instance is None:
self.ref_lens = list(len(reference) for reference in references)
self.references_ngrams = [get_ngrams(references, n + 1) for n in range(self.max_n)]
self.references_counts = [[Counter(l) for l in self.references_ngrams[n]] for n in range(self.max_n)]
self.reference_max_counts = [self.get_reference_max_counts(n) for n in range(self.max_n)]
else:
assert other_instance.max_n >= self.max_n, 'invalid cache!'
assert isinstance(other_instance, Bleu), 'invalid cache!'
ref_lens, \
references_ngrams, \
references_counts, \
reference_max_counts = other_instance.get_cached_fields()
self.ref_lens = ref_lens
self.references_ngrams = references_ngrams[:self.max_n]
self.references_counts = references_counts[:self.max_n]
self.reference_max_counts = reference_max_counts[:self.max_n]
def get_cached_fields(self):
return self.ref_lens, \
self.references_ngrams, \
self.references_counts, \
self.reference_max_counts
def get_score(self, samples, compute_in_parallel=True):
print('evaluating bleu {}!'.format(self.max_n))
if compute_in_parallel:
return Threader(samples, self.tmp_get_score, self.process_num, show_tqdm=False).run()
return [self.tmp_get_score(sample) for sample in samples]
def tmp_get_score(self, item):
return corpus_bleu(self.references, item,
self.reference_max_counts, self.ref_lens, self.weights,
self.smoothing_function, self.auto_reweight)
def get_reference_max_counts(self, n):
print('calculating max counts n = %d!' % ((n + 1),))
ngram_keys = list(set([x for y in self.references_ngrams[n] for x in y]))
return dict(zip(ngram_keys, Threader(ngram_keys, self.tmp_get_reference_max_counts, show_tqdm=True).run()))
# return dict(zip(ngram_keys, multi_run(ngram_keys, self.tmp_get_reference_max_counts, show_tqdm=True)))
def tmp_get_reference_max_counts(self, ngram):
counts = [x.get(ngram, 0) for x in self.references_counts[len(ngram) - 1]]
return np.max(counts)
| 2.46875 | 2 |
setup.py | gspracklin/bwtools | 4 | 11208 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import re
from setuptools import setup, find_packages
# classifiers = """\
# Development Status :: 4 - Beta
# Programming Language :: Python
# Programming Language :: Python :: 3
# Programming Language :: Python :: 3.4
# Programming Language :: Python :: 3.5
# Programming Language :: Python :: 3.6
# Programming Language :: Python :: 3.7
# Programming Language :: Python :: 3.8
# """
def _read(*parts, **kwargs):
filepath = os.path.join(os.path.dirname(__file__), *parts)
encoding = kwargs.pop('encoding', 'utf-8')
with io.open(filepath, encoding=encoding) as fh:
text = fh.read()
return text
def get_version():
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
_read('bwtools', '__init__.py'),
re.MULTILINE).group(1)
return version
def get_long_description():
return _read('README.md')
def get_requirements(path):
content = _read(path)
return [
req
for req in content.split("\n")
if req != '' and not req.startswith('#')
]
install_requires = get_requirements('requirements.txt')
packages = find_packages()
setup(
name='bwtools',
author='<NAME>',
author_email='<EMAIL>',
version=get_version(),
license='MIT',
description='tools for bigwigs',
long_description=get_long_description(),
long_description_content_type='text/markdown',
keywords=['genomics', 'bioinformatics', 'Hi-C', 'analysis', 'cooler'],
url='https://github.com/gspracklin/bwtools',
zip_safe=False,
# classifiers=[s.strip() for s in classifiers.split('\n') if s],
packages=packages,
install_requires=install_requires,
entry_points={
'console_scripts': [
'bwtools = bwtools.cli:cli',
]
}
) | 1.945313 | 2 |
hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | 0 | 11209 | from .trainer.models import MultiTaskTagger
from .trainer.utils import load_dictionaries,Config
from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule
from fairseq.data.data_utils import collate_tokens
from attacut import tokenize
class HoogBERTaEncoder(object):
def __init__(self,layer=12,cuda=False,base_path="."):
args = Config(base_path=base_path)
self.base_path = base_path
self.pos_dict, self.ne_dict, self.sent_dict = load_dictionaries(self.base_path)
self.model = MultiTaskTagger(args,[len(self.pos_dict), len(self.ne_dict), len(self.sent_dict)])
if cuda == True:
self.model = self.model.cuda()
def extract_features(self,sentence):
all_sent = []
sentences = sentence.split(" ")
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch(self,sentenceL):
inputList = []
for sentX in sentenceL:
sentences = sentX.split(" ")
all_sent = []
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in inputList], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
def extract_features_from_tensor(self,batch):
all_layers = self.model.bert.extract_features(batch, return_all_hiddens=True)
return batch, all_layers[-1]
def extract_features2(self,sentence):
# all_sent = []
# sentences = sentence.split(" ")
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch2(self,sentenceL):
# inputList = []
# for sentX in sentenceL:
# sentences = sentX.split(" ")
# all_sent = []
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
# inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in sentenceL], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
| 2.15625 | 2 |
computer_equipment_control/model/__init__.py | hugonp/Proyecto-Triples | 0 | 11210 | <gh_stars>0
from . import usuario
from . import equipo_cambio
from . import equipo_computo
from . import sucursales
from . import depto
from . import usuario
| 1.101563 | 1 |
gui/window.py | frlnx/melee | 0 | 11211 | from pyglet.window import Window as PygletWindow
from .controllers import ComponentContainerController
from .models.container import ComponentContainerModel
from .views import OrthoViewport
class Window(PygletWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._model = ComponentContainerModel([], 0, self.width, 0, self.height)
self._controller = ComponentContainerController(self._model)
self.push_handlers(self._controller)
self._view = OrthoViewport(self._model)
def add_component(self, model):
self._model.add_component(model)
def on_draw(self):
self.clear()
self._view.draw()
| 2.1875 | 2 |
conductor_calculator.py | aj83854/project-lightning-rod | 0 | 11212 | from pyconductor import load_test_values, calculate_conductance
def conductance_calc():
preloaded_dict = load_test_values()
while preloaded_dict:
print(
"[1] - Show currently available materials in Material Dictionary\n"
"[2] - Add a material (will not be saved upon restart)\n"
"[3] - Quit\n"
"To test the conductive properties of a material, simply type in its name.\n"
"Otherwise, type the corresponding number for an option above.\n"
)
main_prompt = input(">>> ").lower()
if main_prompt == "1":
print(f"\nCurrently contains the following materials:\n{preloaded_dict.keys()}\n")
elif main_prompt == "2":
preloaded_dict.addmat()
elif main_prompt == "3":
quit()
else:
try:
calculate_conductance(preloaded_dict[main_prompt])
while True:
again_prompt = input(
"Would you like to try another calculation? [Y]es or [N]o: ").lower()
if again_prompt in ("y", "yes"):
break
elif again_prompt in ("n", "no"):
print("\nGoodbye!\n")
quit()
except KeyError:
if main_prompt == "":
print("\nNo material specified.\nPlease enter a valid material name "
"listed in option [1], or use option [2] to add your own.\n")
else: # TODO: add logic handling whether user wants to add missing material
print(f"\n{main_prompt} is not a valid material or command!\n")
else:
pass
if __name__ == "__main__":
conductance_calc()
| 3.453125 | 3 |
compiler/python_compiler/engines/py3_8/Variable.py | unknowncoder05/app-architect | 3 | 11213 | <reponame>unknowncoder05/app-architect
from .Fragment import Fragment
from utils.flags import *
from utils.CustomLogging import CustomLogging
#from python_compiler.engines.utils.types import get_python_type_str, ANY
DEFAULT_ASSIGN_OPERATOR = "="
ASSIGN_OPERATORS = {
"=":"=",
"+=":"+=",
"-=":"-=",
"*=":"*=",
"/=":"/=",
"//=":"//=",
"%=":"%=",
"**=":"**=",
"&=":"&=",
"|=":"|=",
"^=":"^=",
">>=":">>=",
"<<=":"<<=",
}
def get_variable_name(fragment) -> str:
if not (variable_name := fragment.get(ATTRIBUTE_VARIABLE_NAME)):
CustomLogging.critical(f"Fragment type variable '{ATTRIBUTE_VARIABLE_NAME}' attribute does not exist")
return variable_name
def get_variable_type(fragment) -> str:
if not (variable_type := fragment.get(ATTRIBUTE_VARIABLE_TYPE)):
variable_type = ""
else:
variable_type = ":"+variable_type
return variable_type
def get_variable_assign_operator(fragment) -> str:
if not (variable_assign_operator := fragment.get(ATTRIBUTE_VARIABLE_ASSIGN_OPERATOR)):
variable_assign_operator = DEFAULT_ASSIGN_OPERATOR
return ASSIGN_OPERATORS.get(variable_assign_operator)
def get_variable_expression(fragment) -> str:
if not (variable_expression := fragment.get(ATTRIBUTE_VARIABLE_EXPRESSION)):
CustomLogging.critical(f"Fragment type variable '{ATTRIBUTE_VARIABLE_EXPRESSION}' attribute does not exist")
return variable_expression
class Variable(Fragment):
name:str
variable_type:str
assign_operator:str
expression:str
def __init__(self, blueprint, *args, **kwargs) -> None:
super().__init__(blueprint, *args, **kwargs)
self.name = get_variable_name(blueprint)
self.variable_type = get_variable_type(blueprint)
self.assign_operator = get_variable_assign_operator(blueprint)
self.expression = get_variable_expression(blueprint)
def compile(self)->str:
fragment_build = ""
fragment_build = f"{self.name}{self.variable_type} {self.assign_operator} {self.expression}"
return fragment_build | 2.25 | 2 |
test/win/vs-macros/test_exists.py | chlorm-forks/gyp | 77 | 11214 | <reponame>chlorm-forks/gyp
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if not os.path.exists(sys.argv[1]):
raise Exception()
open(sys.argv[2], 'w').close()
| 1.507813 | 2 |
Lib/site-packages/qwt/scale_draw.py | fochoao/cpython | 0 | 11215 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 <NAME>, for the original C++ code
# Copyright (c) 2015 <NAME>, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtAbstractScaleDraw
--------------------
.. autoclass:: QwtAbstractScaleDraw
:members:
QwtScaleDraw
------------
.. autoclass:: QwtScaleDraw
:members:
"""
from qwt.scale_div import QwtScaleDiv
from qwt.scale_map import QwtScaleMap
from qwt.text import QwtText
from qwt._math import qwtRadians
from qtpy.QtGui import QPalette, QFontMetrics, QTransform
from qtpy.QtCore import Qt, qFuzzyCompare, QLocale, QRectF, QPointF, QRect, QPoint
from math import ceil
import numpy as np
class QwtAbstractScaleDraw_PrivateData(object):
def __init__(self):
self.spacing = 4
self.penWidth = 0
self.minExtent = 0.0
self.components = (
QwtAbstractScaleDraw.Backbone
| QwtAbstractScaleDraw.Ticks
| QwtAbstractScaleDraw.Labels
)
self.tick_length = {
QwtScaleDiv.MinorTick: 4.0,
QwtScaleDiv.MediumTick: 6.0,
QwtScaleDiv.MajorTick: 8.0,
}
self.tick_lighter_factor = {
QwtScaleDiv.MinorTick: 100,
QwtScaleDiv.MediumTick: 100,
QwtScaleDiv.MajorTick: 100,
}
self.map = QwtScaleMap()
self.scaleDiv = QwtScaleDiv()
self.labelCache = {}
class QwtAbstractScaleDraw(object):
"""
A abstract base class for drawing scales
`QwtAbstractScaleDraw` can be used to draw linear or logarithmic scales.
After a scale division has been specified as a `QwtScaleDiv` object
using `setScaleDiv()`, the scale can be drawn with the `draw()` member.
Scale components:
* `QwtAbstractScaleDraw.Backbone`: Backbone = the line where the ticks are located
* `QwtAbstractScaleDraw.Ticks`: Ticks
* `QwtAbstractScaleDraw.Labels`: Labels
.. py:class:: QwtAbstractScaleDraw()
The range of the scale is initialized to [0, 100],
The spacing (distance between ticks and labels) is
set to 4, the tick lengths are set to 4,6 and 8 pixels
"""
# enum ScaleComponent
Backbone = 0x01
Ticks = 0x02
Labels = 0x04
def __init__(self):
self.__data = QwtAbstractScaleDraw_PrivateData()
def extent(self, font):
"""
Calculate the extent
The extent is the distance from the baseline to the outermost
pixel of the scale draw in opposite to its orientation.
It is at least minimumExtent() pixels.
:param QFont font: Font used for drawing the tick labels
:return: Number of pixels
.. seealso::
:py:meth:`setMinimumExtent()`, :py:meth:`minimumExtent()`
"""
return 0.0
def drawTick(self, painter, value, len_):
"""
Draw a tick
:param QPainter painter: Painter
:param float value: Value of the tick
:param float len: Length of the tick
.. seealso::
:py:meth:`drawBackbone()`, :py:meth:`drawLabel()`
"""
pass
def drawBackbone(self, painter):
"""
Draws the baseline of the scale
:param QPainter painter: Painter
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawLabel()`
"""
pass
def drawLabel(self, painter, value):
"""
Draws the label for a major scale tick
:param QPainter painter: Painter
:param float value: Value
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawBackbone()`
"""
pass
def enableComponent(self, component, enable):
"""
En/Disable a component of the scale
:param int component: Scale component
:param bool enable: On/Off
.. seealso::
:py:meth:`hasComponent()`
"""
if enable:
self.__data.components |= component
else:
self.__data.components &= ~component
def hasComponent(self, component):
"""
Check if a component is enabled
:param int component: Component type
:return: True, when component is enabled
.. seealso::
:py:meth:`enableComponent()`
"""
return self.__data.components & component
def setScaleDiv(self, scaleDiv):
"""
Change the scale division
:param qwt.scale_div.QwtScaleDiv scaleDiv: New scale division
"""
self.__data.scaleDiv = scaleDiv
self.__data.map.setScaleInterval(scaleDiv.lowerBound(), scaleDiv.upperBound())
self.__data.labelCache.clear()
def setTransformation(self, transformation):
"""
Change the transformation of the scale
:param qwt.transform.QwtTransform transformation: New scale transformation
"""
self.__data.map.setTransformation(transformation)
def scaleMap(self):
"""
:return: Map how to translate between scale and pixel values
"""
return self.__data.map
def scaleDiv(self):
"""
:return: scale division
"""
return self.__data.scaleDiv
def setPenWidth(self, width):
"""
Specify the width of the scale pen
:param int width: Pen width
.. seealso::
:py:meth:`penWidth()`
"""
if width < 0:
width = 0
if width != self.__data.penWidth:
self.__data.penWidth = width
def penWidth(self):
"""
:return: Scale pen width
.. seealso::
:py:meth:`setPenWidth()`
"""
return self.__data.penWidth
def draw(self, painter, palette):
"""
Draw the scale
:param QPainter painter: The painter
:param QPalette palette: Palette, text color is used for the labels, foreground color for ticks and backbone
"""
painter.save()
pen = painter.pen()
pen.setWidth(self.__data.penWidth)
pen.setCosmetic(False)
painter.setPen(pen)
if self.hasComponent(QwtAbstractScaleDraw.Labels):
painter.save()
painter.setPen(palette.color(QPalette.Text))
majorTicks = self.__data.scaleDiv.ticks(QwtScaleDiv.MajorTick)
for v in majorTicks:
if self.__data.scaleDiv.contains(v):
self.drawLabel(painter, v)
painter.restore()
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
painter.save()
pen = painter.pen()
pen.setCapStyle(Qt.FlatCap)
default_color = palette.color(QPalette.WindowText)
for tickType in range(QwtScaleDiv.NTickTypes):
tickLen = self.__data.tick_length[tickType]
if tickLen <= 0.0:
continue
factor = self.__data.tick_lighter_factor[tickType]
pen.setColor(default_color.lighter(factor))
painter.setPen(pen)
ticks = self.__data.scaleDiv.ticks(tickType)
for v in ticks:
if self.__data.scaleDiv.contains(v):
self.drawTick(painter, v, tickLen)
painter.restore()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
painter.save()
pen = painter.pen()
pen.setColor(palette.color(QPalette.WindowText))
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
self.drawBackbone(painter)
painter.restore()
painter.restore()
def setSpacing(self, spacing):
"""
Set the spacing between tick and labels
The spacing is the distance between ticks and labels.
The default spacing is 4 pixels.
:param float spacing: Spacing
.. seealso::
:py:meth:`spacing()`
"""
if spacing < 0:
spacing = 0
self.__data.spacing = spacing
def spacing(self):
"""
Get the spacing
The spacing is the distance between ticks and labels.
The default spacing is 4 pixels.
:return: Spacing
.. seealso::
:py:meth:`setSpacing()`
"""
return self.__data.spacing
def setMinimumExtent(self, minExtent):
"""
Set a minimum for the extent
The extent is calculated from the components of the
scale draw. In situations, where the labels are
changing and the layout depends on the extent (f.e scrolling
a scale), setting an upper limit as minimum extent will
avoid jumps of the layout.
:param float minExtent: Minimum extent
.. seealso::
:py:meth:`extent()`, :py:meth:`minimumExtent()`
"""
if minExtent < 0.0:
minExtent = 0.0
self.__data.minExtent = minExtent
def minimumExtent(self):
"""
Get the minimum extent
:return: Minimum extent
.. seealso::
:py:meth:`extent()`, :py:meth:`setMinimumExtent()`
"""
return self.__data.minExtent
def setTickLength(self, tick_type, length):
"""
Set the length of the ticks
:param int tick_type: Tick type
:param float length: New length
.. warning::
the length is limited to [0..1000]
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
self.__data.tick_length[tick_type] = min([1000.0, max([0.0, length])])
def tickLength(self, tick_type):
"""
:param int tick_type: Tick type
:return: Length of the ticks
.. seealso::
:py:meth:`setTickLength()`, :py:meth:`maxTickLength()`
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
return self.__data.tick_length[tick_type]
def maxTickLength(self):
"""
:return: Length of the longest tick
Useful for layout calculations
.. seealso::
:py:meth:`tickLength()`, :py:meth:`setTickLength()`
"""
return max([0.0] + list(self.__data.tick_length.values()))
def setTickLighterFactor(self, tick_type, factor):
"""
Set the color lighter factor of the ticks
:param int tick_type: Tick type
:param int factor: New factor
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
self.__data.tick_lighter_factor[tick_type] = min([0, factor])
def tickLighterFactor(self, tick_type):
"""
:param int tick_type: Tick type
:return: Color lighter factor of the ticks
.. seealso::
:py:meth:`setTickLighterFactor()`
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
return self.__data.tick_lighter_factor[tick_type]
def label(self, value):
"""
Convert a value into its representing label
The value is converted to a plain text using
`QLocale().toString(value)`.
This method is often overloaded by applications to have individual
labels.
:param float value: Value
:return: Label string
"""
return QLocale().toString(value)
def tickLabel(self, font, value):
"""
Convert a value into its representing label and cache it.
The conversion between value and label is called very often
in the layout and painting code. Unfortunately the
calculation of the label sizes might be slow (really slow
for rich text in Qt4), so it's necessary to cache the labels.
:param QFont font: Font
:param float value: Value
:return: Tick label
"""
lbl = self.__data.labelCache.get(value)
if lbl is None:
lbl = QwtText(self.label(value))
lbl.setRenderFlags(0)
lbl.setLayoutAttribute(QwtText.MinimumLayout)
lbl.textSize(font)
self.__data.labelCache[value] = lbl
return lbl
def invalidateCache(self):
"""
Invalidate the cache used by `tickLabel()`
The cache is invalidated, when a new `QwtScaleDiv` is set. If
the labels need to be changed. while the same `QwtScaleDiv` is set,
`invalidateCache()` needs to be called manually.
"""
self.__data.labelCache.clear()
class QwtScaleDraw_PrivateData(object):
def __init__(self):
self.len = 0
self.alignment = QwtScaleDraw.BottomScale
self.labelAlignment = 0
self.labelRotation = 0.0
self.labelAutoSize = True
self.pos = QPointF()
class QwtScaleDraw(QwtAbstractScaleDraw):
"""
A class for drawing scales
QwtScaleDraw can be used to draw linear or logarithmic scales.
A scale has a position, an alignment and a length, which can be specified .
The labels can be rotated and aligned
to the ticks using `setLabelRotation()` and `setLabelAlignment()`.
After a scale division has been specified as a QwtScaleDiv object
using `QwtAbstractScaleDraw.setScaleDiv(scaleDiv)`,
the scale can be drawn with the `QwtAbstractScaleDraw.draw()` member.
Alignment of the scale draw:
* `QwtScaleDraw.BottomScale`: The scale is below
* `QwtScaleDraw.TopScale`: The scale is above
* `QwtScaleDraw.LeftScale`: The scale is left
* `QwtScaleDraw.RightScale`: The scale is right
.. py:class:: QwtScaleDraw()
The range of the scale is initialized to [0, 100],
The position is at (0, 0) with a length of 100.
The orientation is `QwtAbstractScaleDraw.Bottom`.
"""
# enum Alignment
BottomScale, TopScale, LeftScale, RightScale = list(range(4))
Flags = (
Qt.AlignHCenter | Qt.AlignBottom, # BottomScale
Qt.AlignHCenter | Qt.AlignTop, # TopScale
Qt.AlignLeft | Qt.AlignVCenter, # LeftScale
Qt.AlignRight | Qt.AlignVCenter, # RightScale
)
def __init__(self):
QwtAbstractScaleDraw.__init__(self)
self.__data = QwtScaleDraw_PrivateData()
self.setLength(100)
self._max_label_sizes = {}
def alignment(self):
"""
:return: Alignment of the scale
.. seealso::
:py:meth:`setAlignment()`
"""
return self.__data.alignment
def setAlignment(self, align):
"""
Set the alignment of the scale
:param int align: Alignment of the scale
Alignment of the scale draw:
* `QwtScaleDraw.BottomScale`: The scale is below
* `QwtScaleDraw.TopScale`: The scale is above
* `QwtScaleDraw.LeftScale`: The scale is left
* `QwtScaleDraw.RightScale`: The scale is right
The default alignment is `QwtScaleDraw.BottomScale`
.. seealso::
:py:meth:`alignment()`
"""
self.__data.alignment = align
def orientation(self):
"""
Return the orientation
TopScale, BottomScale are horizontal (`Qt.Horizontal`) scales,
LeftScale, RightScale are vertical (`Qt.Vertical`) scales.
:return: Orientation of the scale
.. seealso::
:py:meth:`alignment()`
"""
if self.__data.alignment in (self.TopScale, self.BottomScale):
return Qt.Horizontal
elif self.__data.alignment in (self.LeftScale, self.RightScale):
return Qt.Vertical
def getBorderDistHint(self, font):
"""
Determine the minimum border distance
This member function returns the minimum space
needed to draw the mark labels at the scale's endpoints.
:param QFont font: Font
:return: tuple `(start, end)`
Returned tuple:
* start: Start border distance
* end: End border distance
"""
start, end = 0, 1.0
if not self.hasComponent(QwtAbstractScaleDraw.Labels):
return start, end
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if len(ticks) == 0:
return start, end
minTick = ticks[0]
minPos = self.scaleMap().transform(minTick)
maxTick = minTick
maxPos = minPos
for tick in ticks:
tickPos = self.scaleMap().transform(tick)
if tickPos < minPos:
minTick = tick
minPos = tickPos
if tickPos > self.scaleMap().transform(maxTick):
maxTick = tick
maxPos = tickPos
s = 0.0
e = 0.0
if self.orientation() == Qt.Vertical:
s = -self.labelRect(font, minTick).top()
s -= abs(minPos - round(self.scaleMap().p2()))
e = self.labelRect(font, maxTick).bottom()
e -= abs(maxPos - self.scaleMap().p1())
else:
s = -self.labelRect(font, minTick).left()
s -= abs(minPos - self.scaleMap().p1())
e = self.labelRect(font, maxTick).right()
e -= abs(maxPos - self.scaleMap().p2())
return max(ceil(s), 0), max(ceil(e), 0)
def minLabelDist(self, font):
"""
Determine the minimum distance between two labels, that is necessary
that the texts don't overlap.
:param QFont font: Font
:return: The maximum width of a label
.. seealso::
:py:meth:`getBorderDistHint()`
"""
if not self.hasComponent(QwtAbstractScaleDraw.Labels):
return 0
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
fm = QFontMetrics(font)
vertical = self.orientation() == Qt.Vertical
bRect1 = QRectF()
bRect2 = self.labelRect(font, ticks[0])
if vertical:
bRect2.setRect(-bRect2.bottom(), 0.0, bRect2.height(), bRect2.width())
maxDist = 0.0
for tick in ticks:
bRect1 = bRect2
bRect2 = self.labelRect(font, tick)
if vertical:
bRect2.setRect(-bRect2.bottom(), 0.0, bRect2.height(), bRect2.width())
dist = fm.leading()
if bRect1.right() > 0:
dist += bRect1.right()
if bRect2.left() < 0:
dist += -bRect2.left()
if dist > maxDist:
maxDist = dist
angle = qwtRadians(self.labelRotation())
if vertical:
angle += np.pi / 2
sinA = np.sin(angle)
if qFuzzyCompare(sinA + 1.0, 1.0):
return np.ceil(maxDist)
fmHeight = fm.ascent() - 2
labelDist = fmHeight / np.sin(angle) * np.cos(angle)
if labelDist < 0:
labelDist = -labelDist
if labelDist > maxDist:
labelDist = maxDist
if labelDist < fmHeight:
labelDist = fmHeight
return np.ceil(labelDist)
def extent(self, font):
"""
Calculate the width/height that is needed for a
vertical/horizontal scale.
The extent is calculated from the pen width of the backbone,
the major tick length, the spacing and the maximum width/height
of the labels.
:param QFont font: Font used for painting the labels
:return: Extent
.. seealso::
:py:meth:`minLength()`
"""
d = 0.0
if self.hasComponent(QwtAbstractScaleDraw.Labels):
if self.orientation() == Qt.Vertical:
d = self.maxLabelWidth(font)
else:
d = self.maxLabelHeight(font)
if d > 0:
d += self.spacing()
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
d += self.maxTickLength()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
pw = max([1, self.penWidth()])
d += pw
return max([d, self.minimumExtent()])
def minLength(self, font):
"""
Calculate the minimum length that is needed to draw the scale
:param QFont font: Font used for painting the labels
:return: Minimum length that is needed to draw the scale
.. seealso::
:py:meth:`extent()`
"""
startDist, endDist = self.getBorderDistHint(font)
sd = self.scaleDiv()
minorCount = len(sd.ticks(QwtScaleDiv.MinorTick)) + len(
sd.ticks(QwtScaleDiv.MediumTick)
)
majorCount = len(sd.ticks(QwtScaleDiv.MajorTick))
lengthForLabels = 0
if self.hasComponent(QwtAbstractScaleDraw.Labels):
lengthForLabels = self.minLabelDist(font) * majorCount
lengthForTicks = 0
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
pw = max([1, self.penWidth()])
lengthForTicks = np.ceil((majorCount + minorCount) * (pw + 1.0))
return startDist + endDist + max([lengthForLabels, lengthForTicks])
def labelPosition(self, value):
"""
Find the position, where to paint a label
The position has a distance that depends on the length of the ticks
in direction of the `alignment()`.
:param float value: Value
:return: Position, where to paint a label
"""
tval = self.scaleMap().transform(value)
dist = self.spacing()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
dist += max([1, self.penWidth()])
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
dist += self.tickLength(QwtScaleDiv.MajorTick)
px = 0
py = 0
if self.alignment() == self.RightScale:
px = self.__data.pos.x() + dist
py = tval
elif self.alignment() == self.LeftScale:
px = self.__data.pos.x() - dist
py = tval
elif self.alignment() == self.BottomScale:
px = tval
py = self.__data.pos.y() + dist
elif self.alignment() == self.TopScale:
px = tval
py = self.__data.pos.y() - dist
return QPointF(px, py)
def drawTick(self, painter, value, len_):
"""
Draw a tick
:param QPainter painter: Painter
:param float value: Value of the tick
:param float len: Length of the tick
.. seealso::
:py:meth:`drawBackbone()`, :py:meth:`drawLabel()`
"""
if len_ <= 0:
return
pos = self.__data.pos
tval = self.scaleMap().transform(value)
pw = self.penWidth()
a = 0
if self.alignment() == self.LeftScale:
x1 = pos.x() + a
x2 = pos.x() + a - pw - len_
painter.drawLine(x1, tval, x2, tval)
elif self.alignment() == self.RightScale:
x1 = pos.x()
x2 = pos.x() + pw + len_
painter.drawLine(x1, tval, x2, tval)
elif self.alignment() == self.BottomScale:
y1 = pos.y()
y2 = pos.y() + pw + len_
painter.drawLine(tval, y1, tval, y2)
elif self.alignment() == self.TopScale:
y1 = pos.y() + a
y2 = pos.y() - pw - len_ + a
painter.drawLine(tval, y1, tval, y2)
def drawBackbone(self, painter):
"""
Draws the baseline of the scale
:param QPainter painter: Painter
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawLabel()`
"""
pos = self.__data.pos
len_ = self.__data.len
off = 0.5 * self.penWidth()
if self.alignment() == self.LeftScale:
x = pos.x() - off
painter.drawLine(x, pos.y(), x, pos.y() + len_)
elif self.alignment() == self.RightScale:
x = pos.x() + off
painter.drawLine(x, pos.y(), x, pos.y() + len_)
elif self.alignment() == self.TopScale:
y = pos.y() - off
painter.drawLine(pos.x(), y, pos.x() + len_, y)
elif self.alignment() == self.BottomScale:
y = pos.y() + off
painter.drawLine(pos.x(), y, pos.x() + len_, y)
def move(self, *args):
"""
Move the position of the scale
The meaning of the parameter pos depends on the alignment:
* `QwtScaleDraw.LeftScale`:
The origin is the topmost point of the backbone. The backbone is a
vertical line. Scale marks and labels are drawn at the left of the
backbone.
* `QwtScaleDraw.RightScale`:
The origin is the topmost point of the backbone. The backbone is a
vertical line. Scale marks and labels are drawn at the right of
the backbone.
* `QwtScaleDraw.TopScale`:
The origin is the leftmost point of the backbone. The backbone is
a horizontal line. Scale marks and labels are drawn above the
backbone.
* `QwtScaleDraw.BottomScale`:
The origin is the leftmost point of the backbone. The backbone is
a horizontal line Scale marks and labels are drawn below the
backbone.
.. py:method:: move(x, y)
:noindex:
:param float x: X coordinate
:param float y: Y coordinate
.. py:method:: move(pos)
:noindex:
:param QPointF pos: position
.. seealso::
:py:meth:`pos()`, :py:meth:`setLength()`
"""
if len(args) == 2:
x, y = args
self.move(QPointF(x, y))
elif len(args) == 1:
(pos,) = args
self.__data.pos = pos
self.updateMap()
else:
raise TypeError(
"%s().move() takes 1 or 2 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def pos(self):
"""
:return: Origin of the scale
.. seealso::
:py:meth:`pos()`, :py:meth:`setLength()`
"""
return self.__data.pos
def setLength(self, length):
"""
Set the length of the backbone.
The length doesn't include the space needed for overlapping labels.
:param float length: Length of the backbone
.. seealso::
:py:meth:`move()`, :py:meth:`minLabelDist()`
"""
if length >= 0 and length < 10:
length = 10
if length < 0 and length > -10:
length = -10
self.__data.len = length
self.updateMap()
def length(self):
"""
:return: the length of the backbone
.. seealso::
:py:meth:`setLength()`, :py:meth:`pos()`
"""
return self.__data.len
def drawLabel(self, painter, value):
"""
Draws the label for a major scale tick
:param QPainter painter: Painter
:param float value: Value
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawBackbone()`,
:py:meth:`boundingLabelRect()`
"""
lbl = self.tickLabel(painter.font(), value)
if lbl is None or lbl.isEmpty():
return
pos = self.labelPosition(value)
labelSize = lbl.textSize(painter.font())
transform = self.labelTransformation(pos, labelSize)
painter.save()
painter.setWorldTransform(transform, True)
lbl.draw(painter, QRect(QPoint(0, 0), labelSize.toSize()))
painter.restore()
def boundingLabelRect(self, font, value):
"""
Find the bounding rectangle for the label.
The coordinates of the rectangle are absolute (calculated from
`pos()`) in direction of the tick.
:param QFont font: Font used for painting
:param float value: Value
:return: Bounding rectangle
.. seealso::
:py:meth:`labelRect()`
"""
lbl = self.tickLabel(font, value)
if lbl.isEmpty():
return QRect()
pos = self.labelPosition(value)
labelSize = lbl.textSize(font)
transform = self.labelTransformation(pos, labelSize)
return transform.mapRect(QRect(QPoint(0, 0), labelSize.toSize()))
def labelTransformation(self, pos, size):
"""
Calculate the transformation that is needed to paint a label
depending on its alignment and rotation.
:param QPointF pos: Position where to paint the label
:param QSizeF size: Size of the label
:return: Transformation matrix
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`setLabelRotation()`
"""
transform = QTransform()
transform.translate(pos.x(), pos.y())
transform.rotate(self.labelRotation())
flags = self.labelAlignment()
if flags == 0:
flags = self.Flags[self.alignment()]
if flags & Qt.AlignLeft:
x = -size.width()
elif flags & Qt.AlignRight:
x = 0.0
else:
x = -(0.5 * size.width())
if flags & Qt.AlignTop:
y = -size.height()
elif flags & Qt.AlignBottom:
y = 0
else:
y = -(0.5 * size.height())
transform.translate(x, y)
return transform
def labelRect(self, font, value):
"""
Find the bounding rectangle for the label. The coordinates of
the rectangle are relative to spacing + tick length from the backbone
in direction of the tick.
:param QFont font: Font used for painting
:param float value: Value
:return: Bounding rectangle that is needed to draw a label
"""
lbl = self.tickLabel(font, value)
if not lbl or lbl.isEmpty():
return QRectF(0.0, 0.0, 0.0, 0.0)
pos = self.labelPosition(value)
labelSize = lbl.textSize(font)
transform = self.labelTransformation(pos, labelSize)
br = transform.mapRect(QRectF(QPointF(0, 0), labelSize))
br.translate(-pos.x(), -pos.y())
return br
def labelSize(self, font, value):
"""
Calculate the size that is needed to draw a label
:param QFont font: Label font
:param float value: Value
:return: Size that is needed to draw a label
"""
return self.labelRect(font, value).size()
def setLabelRotation(self, rotation):
"""
Rotate all labels.
When changing the rotation, it might be necessary to
adjust the label flags too. Finding a useful combination is
often the result of try and error.
:param float rotation: Angle in degrees. When changing the label rotation, the label flags often needs to be adjusted too.
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`labelRotation()`,
:py:meth:`labelAlignment()`
"""
self.__data.labelRotation = rotation
def labelRotation(self):
"""
:return: the label rotation
.. seealso::
:py:meth:`setLabelRotation()`, :py:meth:`labelAlignment()`
"""
return self.__data.labelRotation
def setLabelAlignment(self, alignment):
"""
Change the label flags
Labels are aligned to the point tick length + spacing away from the
backbone.
The alignment is relative to the orientation of the label text.
In case of an flags of 0 the label will be aligned
depending on the orientation of the scale:
* `QwtScaleDraw.TopScale`: `Qt.AlignHCenter | Qt.AlignTop`
* `QwtScaleDraw.BottomScale`: `Qt.AlignHCenter | Qt.AlignBottom`
* `QwtScaleDraw.LeftScale`: `Qt.AlignLeft | Qt.AlignVCenter`
* `QwtScaleDraw.RightScale`: `Qt.AlignRight | Qt.AlignVCenter`
Changing the alignment is often necessary for rotated labels.
:param Qt.Alignment alignment Or'd `Qt.AlignmentFlags`
.. seealso::
:py:meth:`setLabelRotation()`, :py:meth:`labelRotation()`,
:py:meth:`labelAlignment()`
.. warning::
The various alignments might be confusing. The alignment of the
label is not the alignment of the scale and is not the alignment
of the flags (`QwtText.flags()`) returned from
`QwtAbstractScaleDraw.label()`.
"""
self.__data.labelAlignment = alignment
def labelAlignment(self):
"""
:return: the label flags
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`labelRotation()`
"""
return self.__data.labelAlignment
def setLabelAutoSize(self, state):
"""
Set label automatic size option state
When drawing text labels, if automatic size mode is enabled (default
behavior), the axes are drawn in order to optimize layout space and
depends on text label individual sizes. Otherwise, width and height
won't change when axis range is changing.
This option is not implemented in Qwt C++ library: this may be used
either as an optimization (updating plot layout is faster when this
option is enabled) or as an appearance preference (with Qwt default
behavior, the size of axes may change when zooming and/or panning
plot canvas which in some cases may not be desired).
:param bool state: On/off
.. seealso::
:py:meth:`labelAutoSize()`
"""
self.__data.labelAutoSize = state
def labelAutoSize(self):
"""
:return: True if automatic size option is enabled for labels
.. seealso::
:py:meth:`setLabelAutoSize()`
"""
return self.__data.labelAutoSize
def _get_max_label_size(self, font):
key = (font.toString(), self.labelRotation())
size = self._max_label_sizes.get(key)
if size is None:
size = self.labelSize(font, -999999) # -999999 is the biggest label
size.setWidth(np.ceil(size.width()))
size.setHeight(np.ceil(size.height()))
return self._max_label_sizes.setdefault(key, size)
else:
return size
def maxLabelWidth(self, font):
"""
:param QFont font: Font
:return: the maximum width of a label
"""
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
if self.labelAutoSize():
vmax = sorted(
[v for v in ticks if self.scaleDiv().contains(v)],
key=lambda obj: len(QLocale().toString(obj)),
)[-1]
return np.ceil(self.labelSize(font, vmax).width())
## Original implementation (closer to Qwt's C++ code, but slower):
# return np.ceil(max([self.labelSize(font, v).width()
# for v in ticks if self.scaleDiv().contains(v)]))
else:
return self._get_max_label_size(font).width()
def maxLabelHeight(self, font):
"""
:param QFont font: Font
:return: the maximum height of a label
"""
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
if self.labelAutoSize():
vmax = sorted(
[v for v in ticks if self.scaleDiv().contains(v)],
key=lambda obj: len(QLocale().toString(obj)),
)[-1]
return np.ceil(self.labelSize(font, vmax).height())
## Original implementation (closer to Qwt's C++ code, but slower):
# return np.ceil(max([self.labelSize(font, v).height()
# for v in ticks if self.scaleDiv().contains(v)]))
else:
return self._get_max_label_size(font).height()
def updateMap(self):
pos = self.__data.pos
len_ = self.__data.len
sm = self.scaleMap()
if self.orientation() == Qt.Vertical:
sm.setPaintInterval(pos.y() + len_, pos.y())
else:
sm.setPaintInterval(pos.x(), pos.x() + len_)
| 1.78125 | 2 |
fuzzers/LIFCL/090-sysconfig/fuzzer.py | mfkiwl/prjoxide | 80 | 11216 | from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import re
cfgs = [
FuzzConfig(job="SYSCONFIG40", device="LIFCL-40", sv="../shared/empty_40.v",
tiles=["CIB_R0C75:EFB_0", "CIB_R0C72:BANKREF0", "CIB_R0C77:EFB_1_OSC", "CIB_R0C79:EFB_2",
"CIB_R0C81:I2C_EFB_3", "CIB_R0C85:PMU", "CIB_R0C87:MIB_CNR_32_FAFD", "CIB_R1C87:IREF_P33", "CIB_R2C87:POR"]),
FuzzConfig(job="SYSCONFIG17", device="LIFCL-17", sv="../shared/empty_17.v",
tiles=["CIB_R1C75:IREF_15K", "CIB_R0C75:PPT_QOUT_15K", "CIB_R0C74:PVTCAL33_15K", "CIB_R0C73:POR_15K",
"CIB_R0C72:I2C_15K", "CIB_R0C71:OSC_15K", "CIB_R0C70:PMU_15K", "CIB_R0C66:EFB_15K"])
]
def main():
for cfg in cfgs:
cfg.setup()
empty = cfg.build_design(cfg.sv, {})
cfg.sv = "../shared/empty_presyn_40.v"
cfg.struct_mode = False
def get_substs(k, v):
return dict(sysconfig="{}={}".format(k, v))
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.MASTER_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("MASTER_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of master SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("SLAVE_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of slave SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I2C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I2C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I2C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I3C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I3C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I3C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.JTAG_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("JTAG_PORT", x), False,
assume_zero_base=True,
desc="status of JTAG port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.DONE_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("DONE_PORT", x), False,
assume_zero_base=True,
desc="use DONE output after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.INITN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("INITN_PORT", x), False,
assume_zero_base=True,
desc="use INITN input after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.PROGRAMN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("PROGRAMN_PORT", x), False,
assume_zero_base=True,
desc="use PROGRAMN input after configuration")
if __name__ == "__main__":
main()
| 1.789063 | 2 |
test.py | jdonovanCS/CS-352-Evolutionary-Computation | 0 | 11217 | import argparse
import collections
import os
import random
import json
from copy import deepcopy
import ConfigSpace
import numpy as np
# from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark,\
# FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark
from tabular_benchmarks import NASCifar10A, NASCifar10B | 1.03125 | 1 |
tests/test_base64_uuid.py | cds-snc/notifier-utils | 3 | 11218 | from uuid import UUID
import os
import pytest
from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64
def test_bytes_to_base64_to_bytes():
b = os.urandom(32)
b64 = bytes_to_base64(b)
assert base64_to_bytes(b64) == b
@pytest.mark.parametrize(
"url_val",
[
"AAAAAAAAAAAAAAAAAAAAAQ",
"AAAAAAAAAAAAAAAAAAAAAQ=", # even though this has invalid padding we put extra =s on the end so this is okay
"AAAAAAAAAAAAAAAAAAAAAQ==",
],
)
def test_base64_converter_to_python(url_val):
assert base64_to_uuid(url_val) == UUID(int=1)
@pytest.mark.parametrize("python_val", [UUID(int=1), "00000000-0000-0000-0000-000000000001"])
def test_base64_converter_to_url(python_val):
assert uuid_to_base64(python_val) == "AAAAAAAAAAAAAAAAAAAAAQ"
@pytest.mark.parametrize(
"url_val",
[
"this_is_valid_base64_but_is_too_long_to_be_a_uuid",
"this_one_has_emoji_➕➕➕",
],
)
def test_base64_converter_to_python_raises_validation_error(url_val):
with pytest.raises(Exception):
base64_to_uuid(url_val)
def test_base64_converter_to_url_raises_validation_error():
with pytest.raises(Exception):
uuid_to_base64(object())
| 2.609375 | 3 |
homeassistant/components/devolo_home_control/__init__.py | dummys/home-assistant | 11 | 11219 | """The devolo_home_control integration."""
from __future__ import annotations
import asyncio
from functools import partial
from types import MappingProxyType
from typing import Any
from devolo_home_control_api.exceptions.gateway import GatewayOfflineError
from devolo_home_control_api.homecontrol import HomeControl
from devolo_home_control_api.mydevolo import Mydevolo
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from .const import (
CONF_MYDEVOLO,
DEFAULT_MYDEVOLO,
DOMAIN,
GATEWAY_SERIAL_PATTERN,
PLATFORMS,
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the devolo account from a config entry."""
hass.data.setdefault(DOMAIN, {})
mydevolo = configure_mydevolo(entry.data)
credentials_valid = await hass.async_add_executor_job(mydevolo.credentials_valid)
if not credentials_valid:
raise ConfigEntryAuthFailed
if await hass.async_add_executor_job(mydevolo.maintenance):
raise ConfigEntryNotReady
gateway_ids = await hass.async_add_executor_job(mydevolo.get_gateway_ids)
if entry.unique_id and GATEWAY_SERIAL_PATTERN.match(entry.unique_id):
uuid = await hass.async_add_executor_job(mydevolo.uuid)
hass.config_entries.async_update_entry(entry, unique_id=uuid)
try:
zeroconf_instance = await zeroconf.async_get_instance(hass)
hass.data[DOMAIN][entry.entry_id] = {"gateways": [], "listener": None}
for gateway_id in gateway_ids:
hass.data[DOMAIN][entry.entry_id]["gateways"].append(
await hass.async_add_executor_job(
partial(
HomeControl,
gateway_id=gateway_id,
mydevolo_instance=mydevolo,
zeroconf_instance=zeroconf_instance,
)
)
)
except GatewayOfflineError as err:
raise ConfigEntryNotReady from err
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
def shutdown(event: Event) -> None:
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
gateway.websocket_disconnect(
f"websocket disconnect requested by {EVENT_HOMEASSISTANT_STOP}"
)
# Listen when EVENT_HOMEASSISTANT_STOP is fired
hass.data[DOMAIN][entry.entry_id]["listener"] = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, shutdown
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
await asyncio.gather(
*[
hass.async_add_executor_job(gateway.websocket_disconnect)
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]
]
)
hass.data[DOMAIN][entry.entry_id]["listener"]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload
def configure_mydevolo(conf: dict[str, Any] | MappingProxyType[str, Any]) -> Mydevolo:
"""Configure mydevolo."""
mydevolo = Mydevolo()
mydevolo.user = conf[CONF_USERNAME]
mydevolo.password = conf[CONF_PASSWORD]
mydevolo.url = conf.get(CONF_MYDEVOLO, DEFAULT_MYDEVOLO)
return mydevolo
| 1.789063 | 2 |
trab1/hillClimbing.py | RafaelPedruzzi/IA-2019-2 | 0 | 11220 | ## -------------------------------------------------------- ##
# Trab 1 IA 2019-2
#
# <NAME>
#
# hillClimbing.py: implements the hill climbing metaheuristic for the bag problem
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import bagProblem as bp
from time import time
# Returns True and the valid state with the biggest value, or False if no state is valid:
def select_Best(si, T, OBJs):
sn = -1 # best state position
sv = 0 # state value
for i in range(len(si)):
v = bp.state_Value(si[i], OBJs) # current value
if bp.state_Verify(si[i], T, OBJs) and v > sv:
sv = v
sn = i
if sn == -1:
return False, []
return True, si[sn]
# Hill Climbing:
def hill_Climbing(T, OBJs, execTime, *args):
sn = [0]*len(OBJs) # initial state
c = True # continue flag
start = time()
while c:
if time() - start > execTime:
break
cs = sn # storing current state
c, sn = select_Best(bp.state_Expansion(cs), T, OBJs)
return cs
# T = 19 # bag size
# OBJs = [(1,3), (4,6), (5,7)] # object list (v,t)
# print(hill_Climbing(T,OBJs))
| 2.890625 | 3 |
ssb_pseudonymization/__init__.py | statisticsnorway/ssb-pseudonymization-py | 1 | 11221 | <filename>ssb_pseudonymization/__init__.py
"""ssb-pseudonymization - Data pseudonymization functions used by SSB"""
__version__ = '0.0.2'
__author__ = '<NAME> (ssb.no)'
__all__ = []
| 1.070313 | 1 |
hwilib/devices/keepkey.py | cjackie/HWI | 285 | 11222 | <filename>hwilib/devices/keepkey.py
"""
Keepkey
*******
"""
from ..errors import (
DEVICE_NOT_INITIALIZED,
DeviceNotReadyError,
common_err_msgs,
handle_errors,
)
from .trezorlib import protobuf as p
from .trezorlib.transport import (
hid,
udp,
webusb,
)
from .trezor import TrezorClient, HID_IDS, WEBUSB_IDS
from .trezorlib.messages import (
DebugLinkState,
Features,
HDNodeType,
ResetDevice,
)
from typing import (
Any,
Dict,
List,
Optional,
)
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
KEEPKEY_HID_IDS = {(0x2B24, 0x0001)}
KEEPKEY_WEBUSB_IDS = {(0x2B24, 0x0002)}
KEEPKEY_SIMULATOR_PATH = '127.0.0.1:11044'
HID_IDS.update(KEEPKEY_HID_IDS)
WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)
class KeepkeyFeatures(Features): # type: ignore
def __init__(
self,
*,
firmware_variant: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.firmware_variant = firmware_variant
self.firmware_hash = firmware_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('vendor', p.UnicodeType, None),
2: ('major_version', p.UVarintType, None),
3: ('minor_version', p.UVarintType, None),
4: ('patch_version', p.UVarintType, None),
5: ('bootloader_mode', p.BoolType, None),
6: ('device_id', p.UnicodeType, None),
7: ('pin_protection', p.BoolType, None),
8: ('passphrase_protection', p.BoolType, None),
9: ('language', p.UnicodeType, None),
10: ('label', p.UnicodeType, None),
12: ('initialized', p.BoolType, None),
13: ('revision', p.BytesType, None),
14: ('bootloader_hash', p.BytesType, None),
15: ('imported', p.BoolType, None),
16: ('unlocked', p.BoolType, None),
21: ('model', p.UnicodeType, None),
22: ('firmware_variant', p.UnicodeType, None),
23: ('firmware_hash', p.BytesType, None),
24: ('no_backup', p.BoolType, None),
25: ('wipe_code_protection', p.BoolType, None),
}
class KeepkeyResetDevice(ResetDevice): # type: ignore
def __init__(
self,
*,
auto_lock_delay_ms: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.auto_lock_delay_ms = auto_lock_delay_ms
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('display_random', p.BoolType, None),
2: ('strength', p.UVarintType, 256), # default=256
3: ('passphrase_protection', p.BoolType, None),
4: ('pin_protection', p.BoolType, None),
5: ('language', p.UnicodeType, "en-US"), # default=en-US
6: ('label', p.UnicodeType, None),
7: ('no_backup', p.BoolType, None),
8: ('auto_lock_delay_ms', p.UVarintType, None),
9: ('u2f_counter', p.UVarintType, None),
}
class KeepkeyDebugLinkState(DebugLinkState): # type: ignore
def __init__(
self,
*,
recovery_cipher: Optional[str] = None,
recovery_auto_completed_word: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
storage_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.recovery_cipher = recovery_cipher
self.recovery_auto_completed_word = recovery_auto_completed_word
self.firmware_hash = firmware_hash
self.storage_hash = storage_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldType]:
return {
1: ('layout', p.BytesType, None),
2: ('pin', p.UnicodeType, None),
3: ('matrix', p.UnicodeType, None),
4: ('mnemonic_secret', p.BytesType, None),
5: ('node', HDNodeType, None),
6: ('passphrase_protection', p.BoolType, None),
7: ('reset_word', p.UnicodeType, None),
8: ('reset_entropy', p.BytesType, None),
9: ('recovery_fake_word', p.UnicodeType, None),
10: ('recovery_word_pos', p.UVarintType, None),
11: ('recovery_cipher', p.UnicodeType, None),
12: ('recovery_auto_completed_word', p.UnicodeType, None),
13: ('firmware_hash', p.BytesType, None),
14: ('storage_hash', p.BytesType, None),
}
class KeepkeyClient(TrezorClient):
def __init__(self, path: str, password: str = "", expert: bool = False) -> None:
"""
The `KeepkeyClient` is a `HardwareWalletClient` for interacting with the Keepkey.
As Keepkeys are clones of the Trezor 1, please refer to `TrezorClient` for documentation.
"""
super(KeepkeyClient, self).__init__(path, password, expert, KEEPKEY_HID_IDS, KEEPKEY_WEBUSB_IDS, KEEPKEY_SIMULATOR_PATH)
self.type = 'Keepkey'
self.client.vendors = ("keepkey.com")
self.client.minimum_versions = {"K1-14AM": (0, 0, 0)}
self.client.map_type_to_class_override[KeepkeyFeatures.MESSAGE_WIRE_TYPE] = KeepkeyFeatures
self.client.map_type_to_class_override[KeepkeyResetDevice.MESSAGE_WIRE_TYPE] = KeepkeyResetDevice
if self.simulator:
self.client.debug.map_type_to_class_override[KeepkeyDebugLinkState.MESSAGE_WIRE_TYPE] = KeepkeyDebugLinkState
def enumerate(password: str = "") -> List[Dict[str, Any]]:
results = []
devs = hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)
devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))
devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))
for dev in devs:
d_data: Dict[str, Any] = {}
d_data['type'] = 'keepkey'
d_data['model'] = 'keepkey'
d_data['path'] = dev.get_path()
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = KeepkeyClient(d_data['path'], password)
try:
client.client.refresh_features()
except TypeError:
continue
if 'keepkey' not in client.client.features.vendor:
continue
d_data['label'] = client.client.features.label
if d_data['path'].startswith('udp:'):
d_data['model'] += '_simulator'
d_data['needs_pin_sent'] = client.client.features.pin_protection and not client.client.features.unlocked
d_data['needs_passphrase_sent'] = client.client.features.passphrase_protection # always need the passphrase sent for Keepkey if it has passphrase protection enabled
if d_data['needs_pin_sent']:
raise DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')
if d_data['needs_passphrase_sent'] and not password:
raise DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")
if client.client.features.initialized:
d_data['fingerprint'] = client.get_master_fingerprint().hex()
d_data['needs_passphrase_sent'] = False # Passphrase is always needed for the above to have worked, so it's already sent
else:
d_data['error'] = 'Not initialized'
d_data['code'] = DEVICE_NOT_INITIALIZED
if client:
client.close()
results.append(d_data)
return results
| 2.09375 | 2 |
sklearn_extra/cluster/_k_medoids.py | chkoar/scikit-learn-extra | 1 | 11223 | <reponame>chkoar/scikit-learn-extra<filename>sklearn_extra/cluster/_k_medoids.py
# -*- coding: utf-8 -*-
"""K-medoids clustering"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import (
pairwise_distances,
pairwise_distances_argmin,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import ConvergenceWarning
class KMedoids(BaseEstimator, ClusterMixin, TransformerMixin):
"""k-medoids clustering.
Read more in the :ref:`User Guide <k_medoids>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of medoids to
generate.
metric : string, or callable, optional, default: 'euclidean'
What distance metric to use. See :func:metrics.pairwise_distances
init : {'random', 'heuristic', 'k-medoids++'}, optional, default: 'heuristic'
Specify medoid initialization method. 'random' selects n_clusters
elements from the dataset. 'heuristic' picks the n_clusters points
with the smallest sum distance to every other point. 'k-medoids++'
follows an approach based on k-means++_, and in general, gives initial
medoids which are more separated than those generated by the other methods.
.. _k-means++: https://theory.stanford.edu/~sergei/papers/kMeansPP-soda.pdf
max_iter : int, optional, default : 300
Specify the maximum number of iterations when fitting.
random_state : int, RandomState instance or None, optional
Specify random state for the random number generator. Used to
initialise medoids when init='random'.
Attributes
----------
cluster_centers_ : array, shape = (n_clusters, n_features)
or None if metric == 'precomputed'
Cluster centers, i.e. medoids (elements from the original dataset)
medoid_indices_ : array, shape = (n_clusters,)
The indices of the medoid rows in X
labels_ : array, shape = (n_samples,)
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn_extra.cluster import KMedoids
>>> import numpy as np
>>> X = np.asarray([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmedoids = KMedoids(n_clusters=2, random_state=0).fit(X)
>>> kmedoids.labels_
array([0, 0, 0, 1, 1, 1])
>>> kmedoids.predict([[0,0], [4,4]])
array([0, 1])
>>> kmedoids.cluster_centers_
array([[1, 2],
[4, 2]])
>>> kmedoids.inertia_
8.0
See scikit-learn-extra/examples/plot_kmedoids_digits.py for examples
of KMedoids with various distance metrics.
References
----------
<NAME>. and <NAME>., Statistical Data Analysis Based on
the L1–Norm and Related Methods, edited by <NAME>, North-Holland,
405–416. 1987
See also
--------
KMeans
The KMeans algorithm minimizes the within-cluster sum-of-squares
criterion. It scales well to large number of samples.
Notes
-----
Since all pairwise distances are calculated and stored in memory for
the duration of fit, the space complexity is O(n_samples ** 2).
"""
def __init__(
self,
n_clusters=8,
metric="euclidean",
init="heuristic",
max_iter=300,
random_state=None,
):
self.n_clusters = n_clusters
self.metric = metric
self.init = init
self.max_iter = max_iter
self.random_state = random_state
def _check_nonnegative_int(self, value, desc):
"""Validates if value is a valid integer > 0"""
if (
value is None
or value <= 0
or not isinstance(value, (int, np.integer))
):
raise ValueError(
"%s should be a nonnegative integer. "
"%s was given" % (desc, value)
)
def _check_init_args(self):
"""Validates the input arguments. """
# Check n_clusters and max_iter
self._check_nonnegative_int(self.n_clusters, "n_clusters")
self._check_nonnegative_int(self.max_iter, "max_iter")
# Check init
init_methods = ["random", "heuristic", "k-medoids++"]
if self.init not in init_methods:
raise ValueError(
"init needs to be one of "
+ "the following: "
+ "%s" % init_methods
)
def fit(self, X, y=None):
"""Fit K-Medoids to the provided data.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features), \
or (n_samples, n_samples) if metric == 'precomputed'
Dataset to cluster.
y : Ignored
Returns
-------
self
"""
random_state_ = check_random_state(self.random_state)
self._check_init_args()
X = check_array(X, accept_sparse=["csr", "csc"])
if self.n_clusters > X.shape[0]:
raise ValueError(
"The number of medoids (%d) must be less "
"than the number of samples %d."
% (self.n_clusters, X.shape[0])
)
D = pairwise_distances(X, metric=self.metric)
medoid_idxs = self._initialize_medoids(
D, self.n_clusters, random_state_
)
labels = None
# Continue the algorithm as long as
# the medoids keep changing and the maximum number
# of iterations is not exceeded
for self.n_iter_ in range(0, self.max_iter):
old_medoid_idxs = np.copy(medoid_idxs)
labels = np.argmin(D[medoid_idxs, :], axis=0)
# Update medoids with the new cluster indices
self._update_medoid_idxs_in_place(D, labels, medoid_idxs)
if np.all(old_medoid_idxs == medoid_idxs):
break
elif self.n_iter_ == self.max_iter - 1:
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
# Set the resulting instance variables.
if self.metric == "precomputed":
self.cluster_centers_ = None
else:
self.cluster_centers_ = X[medoid_idxs]
# Expose labels_ which are the assignments of
# the training data to clusters
self.labels_ = labels
self.medoid_indices_ = medoid_idxs
self.inertia_ = self._compute_inertia(self.transform(X))
# Return self to enable method chaining
return self
def _update_medoid_idxs_in_place(self, D, labels, medoid_idxs):
"""In-place update of the medoid indices"""
# Update the medoids for each cluster
for k in range(self.n_clusters):
# Extract the distance matrix between the data points
# inside the cluster k
cluster_k_idxs = np.where(labels == k)[0]
if len(cluster_k_idxs) == 0:
warnings.warn(
"Cluster {k} is empty! "
"self.labels_[self.medoid_indices_[{k}]] "
"may not be labeled with "
"its corresponding cluster ({k}).".format(k=k)
)
continue
in_cluster_distances = D[
cluster_k_idxs, cluster_k_idxs[:, np.newaxis]
]
# Calculate all costs from each point to all others in the cluster
in_cluster_all_costs = np.sum(in_cluster_distances, axis=1)
min_cost_idx = np.argmin(in_cluster_all_costs)
min_cost = in_cluster_all_costs[min_cost_idx]
curr_cost = in_cluster_all_costs[
np.argmax(cluster_k_idxs == medoid_idxs[k])
]
# Adopt a new medoid if its distance is smaller then the current
if min_cost < curr_cost:
medoid_idxs[k] = cluster_k_idxs[min_cost_idx]
def transform(self, X):
"""Transforms X to cluster-distance space.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Data to transform.
Returns
-------
X_new : {array-like, sparse matrix}, shape=(n_query, n_clusters)
X transformed in the new space of distances to cluster centers.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return X[:, self.medoid_indices_]
else:
check_is_fitted(self, "cluster_centers_")
Y = self.cluster_centers_
return pairwise_distances(X, Y=Y, metric=self.metric)
def predict(self, X):
"""Predict the closest cluster for each sample in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
New data to predict.
Returns
-------
labels : array, shape = (n_query,)
Index of the cluster each sample belongs to.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return np.argmin(X[:, self.medoid_indices_], axis=1)
else:
check_is_fitted(self, "cluster_centers_")
# Return data points to clusters based on which cluster assignment
# yields the smallest distance
return pairwise_distances_argmin(
X, Y=self.cluster_centers_, metric=self.metric
)
def _compute_inertia(self, distances):
"""Compute inertia of new samples. Inertia is defined as the sum of the
sample distances to closest cluster centers.
Parameters
----------
distances : {array-like, sparse matrix}, shape=(n_samples, n_clusters)
Distances to cluster centers.
Returns
-------
Sum of sample distances to closest cluster centers.
"""
# Define inertia as the sum of the sample-distances
# to closest cluster centers
inertia = np.sum(np.min(distances, axis=1))
return inertia
def _initialize_medoids(self, D, n_clusters, random_state_):
"""Select initial mediods when beginning clustering."""
if self.init == "random": # Random initialization
# Pick random k medoids as the initial ones.
medoids = random_state_.choice(len(D), n_clusters)
elif self.init == "k-medoids++":
medoids = self._kpp_init(D, n_clusters, random_state_)
elif self.init == "heuristic": # Initialization by heuristic
# Pick K first data points that have the smallest sum distance
# to every other point. These are the initial medoids.
medoids = np.argpartition(np.sum(D, axis=1), n_clusters - 1)[
:n_clusters
]
else:
raise ValueError(
"init value '{init}' not recognized".format(init=self.init)
)
return medoids
# Copied from sklearn.cluster.k_means_._k_init
def _kpp_init(self, D, n_clusters, random_state_, n_local_trials=None):
"""Init n_clusters seeds with a method similar to k-means++
Parameters
-----------
D : array, shape (n_samples, n_samples)
The distance matrix we will use to select medoid indices.
n_clusters : integer
The number of seeds to choose
random_state : RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-medoid clustering in a smart way
to speed up convergence. see: <NAME>. and <NAME>.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, _ = D.shape
centers = np.empty(n_clusters, dtype=int)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state_.randint(n_samples)
centers[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = D[centers[0], :] ** 2
current_pot = closest_dist_sq.sum()
# pick the remaining n_clusters-1 points
for cluster_index in range(1, n_clusters):
rand_vals = (
random_state_.random_sample(n_local_trials) * current_pot
)
candidate_ids = np.searchsorted(
stable_cumsum(closest_dist_sq), rand_vals
)
# Compute distances to center candidates
distance_to_candidates = D[candidate_ids, :] ** 2
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(
closest_dist_sq, distance_to_candidates[trial]
)
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
centers[cluster_index] = best_candidate
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
| 2.125 | 2 |
agent.py | shukia/2048-python | 0 | 11224 | <reponame>shukia/2048-python
from logic import *
class Agent:
def __init__(self):
self.matrix = []
self.score = 0
def initialize_game(self):
self.score = 0
self.matrix = new_game(4)
self.matrix = add_two(self.matrix)
self.matrix = add_two(self.matrix)
def move(self, direction):
self.matrix, board_changed, score_change = move(self.matrix, direction)
if board_changed:
self.matrix = add_two(self.matrix)
self.score += score_change
return self.matrix, self.score, game_state(self.matrix)
def simulate_move(self, direction):
mat, board_changed, score_change = move(self.matrix, direction)
return mat, self.score + score_change, game_state(mat), board_changed
| 3.65625 | 4 |
config/synthia_rand_cityscapes.py | BarracudaPff/code-golf-data-pythpn | 0 | 11225 | <reponame>BarracudaPff/code-golf-data-pythpn
problem_type = "segmentation"
dataset_name = "synthia_rand_cityscapes"
dataset_name2 = None
perc_mb2 = None
model_name = "resnetFCN"
freeze_layers_from = None
show_model = False
load_imageNet = True
load_pretrained = False
weights_file = "weights.hdf5"
train_model = True
test_model = True
pred_model = False
debug = True
debug_images_train = 50
debug_images_valid = 50
debug_images_test = 50
debug_n_epochs = 2
batch_size_train = 2
batch_size_valid = 2
batch_size_test = 2
crop_size_train = (512, 512)
crop_size_valid = None
crop_size_test = None
resize_train = None
resize_valid = None
resize_test = None
shuffle_train = True
shuffle_valid = False
shuffle_test = False
seed_train = 1924
seed_valid = 1924
seed_test = 1924
optimizer = "rmsprop"
learning_rate = 0.0001
weight_decay = 0.0
n_epochs = 1000
save_results_enabled = True
save_results_nsamples = 5
save_results_batch_size = 5
save_results_n_legend_rows = 1
earlyStopping_enabled = True
earlyStopping_monitor = "val_jaccard"
earlyStopping_mode = "max"
earlyStopping_patience = 50
earlyStopping_verbose = 0
checkpoint_enabled = True
checkpoint_monitor = "val_jaccard"
checkpoint_mode = "max"
checkpoint_save_best_only = True
checkpoint_save_weights_only = True
checkpoint_verbose = 0
plotHist_enabled = True
plotHist_verbose = 0
LRScheduler_enabled = True
LRScheduler_batch_epoch = "batch"
LRScheduler_type = "poly"
LRScheduler_M = 75000
LRScheduler_decay = 0.1
LRScheduler_S = 10000
LRScheduler_power = 0.9
TensorBoard_enabled = True
TensorBoard_histogram_freq = 1
TensorBoard_write_graph = True
TensorBoard_write_images = False
TensorBoard_logs_folder = None
norm_imageNet_preprocess = True
norm_fit_dataset = False
norm_rescale = 1
norm_featurewise_center = False
norm_featurewise_std_normalization = False
norm_samplewise_center = False
norm_samplewise_std_normalization = False
norm_gcn = False
norm_zca_whitening = False
cb_weights_method = None
da_rotation_range = 0
da_width_shift_range = 0.0
da_height_shift_range = 0.0
da_shear_range = 0.0
da_zoom_range = 0.5
da_channel_shift_range = 0.0
da_fill_mode = "constant"
da_cval = 0.0
da_horizontal_flip = True
da_vertical_flip = False
da_spline_warp = False
da_warp_sigma = 10
da_warp_grid_size = 3
da_save_to_dir = False | 1.742188 | 2 |
utils/HTMLParser.py | onyb/janitor | 0 | 11226 | from bs4 import BeautifulSoup
from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer
from optimizers.CSSOptimizer import CSSOptimizer
class HTMLParser(object):
def __init__(self, html):
self.soup = BeautifulSoup(html, 'lxml')
def js_parser(self):
for script in self.soup.find_all('script'):
opt = AdvancedJSOptimizer()
script.string = opt.process(script.string)
def css_parser(self):
for style in self.soup.find_all('style'):
opt = CSSOptimizer()
style.string = opt.process(style.string) | 2.671875 | 3 |
spades/main.py | kuwv/spades | 0 | 11227 | '''Provide interface for game.'''
from typing import Any, Dict, List, Optional, Union
import flask
from flask import Blueprint, url_for
from flask_login import current_user, login_required
from flask_wtf import FlaskForm
from flask_sse import sse
from werkzeug.wrappers import Response
from wtforms import IntegerField, SubmitField
from wtforms.validators import DataRequired, NumberRange
# from spades import exceptions
from spades.game import GameState
from spades.game.models.player import Player
main = Blueprint('main', __name__)
mock_names: List[str] = ['john']
__game: GameState = GameState()
class LobbyForm(FlaskForm):
start_game: SubmitField = SubmitField('start game')
join_game: SubmitField = SubmitField('join game')
class BidForm(FlaskForm):
bid: IntegerField = IntegerField(
'bid',
validators=[
DataRequired(),
NumberRange(min=1, max=13)
]
)
submit: SubmitField = SubmitField('bid')
def get_player() -> Optional[Player]:
player = __game.get_player_by_username(current_user.username)
if not player:
__game.add_player(Player(current_user.username))
player = __game.get_player_by_username(current_user.username)
return player
def get_turns(players: List[Player]) -> List[Dict[str, Any]]:
player_turns: List[Dict[str, Any]] = []
def is_active(turn: int) -> str:
if __game.state != 'playing': # type: ignore
print('gamestate', False)
return 'false'
elif __game.current_turn != turn:
print('turn:', __game.current_turn, turn)
return 'false'
else:
print('active:', True)
return 'true'
for n, player in enumerate(players):
inst = {
'username': player.username,
'active': is_active(n)
}
if player.username == current_user.username:
inst['hand'] = player.hand.to_json # type: ignore
else:
inst['card_count'] = len(player.hand) # type: ignore
player_turns.append(inst)
print('player turns', player_turns)
return player_turns
@main.route('/')
def index() -> str:
'''Provide start page.'''
return flask.render_template('index.html')
@main.route('/lobby', methods=['GET', 'POST'])
@login_required
def lobby() -> Union[Response, str]:
'''Provide lobby to coordinate new games.'''
form = LobbyForm()
if form.validate_on_submit():
if form.join_game.data:
print('join game')
if (
hasattr(__game, 'state') and
__game.state == 'waiting' # type: ignore
):
if not __game.get_player_by_username(
current_user.username
):
__game.add_player(Player(current_user.username))
if __game.check_player_count():
__game.start_game() # type: ignore
return flask.redirect(url_for('main.gameboard'))
# if games != []:
# return flask.render_template(
# 'lobby.html', form=form, games=mock_names
# )
return flask.render_template('lobby.html', form=form)
@main.route('/play', methods=['POST'])
@login_required
def play() -> None:
'''Publish card play for user.'''
username = flask.request.form['username']
rank = flask.request.form['rank']
suit = flask.request.form['suit']
card_played = {'username': username, 'rank': rank, 'suit': suit}
# TODO: submit card to game
print(
'turn',
__game.state, # type: ignore
__game.get_player_turn(username),
__game.current_turn
)
__game.make_play(__game.get_player_turn(username), rank, suit)
sse.publish(card_played, type='play-card')
@main.route('/bids', methods=['GET', 'POST'])
@login_required
def bids() -> Union[Response, str]:
form = BidForm()
if form.validate_on_submit():
player_bid = flask.request.form['bid']
__game.accept_bid(
__game.get_player_turn(current_user.username),
player_bid
)
__game.start_turn() # type: ignore
return flask.redirect(url_for('main.gameboard'))
player = get_player()
return flask.render_template(
'bid.html', form=form, data=player.hand.to_json # type: ignore
)
@main.route('/gameboard')
@login_required
def gameboard() -> Union[Response, str]:
'''Provide gameboard.'''
# Setup mock players - less than four fail
for player_name in mock_names:
if not __game.get_player_by_username(player_name):
__game.add_player(Player(player_name))
# mock end
players = []
player = get_player()
if __game.check_player_count():
if __game.state == 'waiting': # type: ignore
__game.start_game()
print('starting game', __game.state)
if __game.state == 'bidding': # type: ignore
print('cards', player.hand.to_json)
print('accepting bids')
# return flask.redirect(url_for('main.bids'))
if __game.state == 'playing': # type: ignore
print('playing game')
if __game.state == 'cleanup': # type: ignore
print('clean up match')
players = get_turns(__game.players)
if hasattr(player, 'hand'):
print('hand')
return flask.render_template(
'gameboard.html', state=__game.state, data=players # type: ignore
)
else:
print('no hand')
return flask.render_template('gameboard.html')
| 2.65625 | 3 |
cluster-1.59/heatmap_clustering/binding.gyp | ericaflin/libheatmap | 2 | 11228 | <gh_stars>1-10
{
"targets": [
{
"target_name": "cclust",
"sources": [ "./src/heatmap_clustering_js_module.cpp" ],
'dependencies': ['bonsaiclust']
},
{
'target_name': 'bonsaiclust',
'type': 'static_library',
'sources': [ 'src/cluster.c' ],
'cflags': ['-fPIC', '-I', '-pedantic', '-Wall']
}
]
}
| 0.828125 | 1 |
pcg_gazebo/parsers/urdf/__init__.py | TForce1/pcg_gazebo | 40 | 11229 | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .actuator import Actuator
from .axis import Axis
from .box import Box
from .child import Child
from .collision import Collision
from .color import Color
from .cylinder import Cylinder
from .dynamics import Dynamics
from .gazebo import Gazebo
from .geometry import Geometry
from .hardware_interface import HardwareInterface
from .inertia import Inertia
from .inertial import Inertial
from .joint import Joint
from .limit import Limit
from .link import Link
from .mass import Mass
from .material import Material
from .mechanical_reduction import MechanicalReduction
from .mesh import Mesh
from .mimic import Mimic
from .origin import Origin
from .parent import Parent
from .robot import Robot
from .safety_controller import SafetyController
from .sphere import Sphere
from .texture import Texture
from .transmission import Transmission
from .type import Type
from .visual import Visual
def get_all_urdf_element_classes():
"""Get list of all URDF element classes."""
import sys
import inspect
from ..types import XMLBase
output = list()
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase) and obj._TYPE == 'urdf':
output.append(obj)
return output
def create_urdf_element(tag, *args):
"""URDF element factory.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
* `args`: Extra arguments for URDF element constructor.
> *Returns*
URDF element if `tag` refers to a valid URDF element.
`None`, otherwise.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj(*args)
return None
def create_urdf_type(tag):
"""Return handle of the URDF element type.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
> *Returns*
URDF element type if `tag` is valid, `None` otherwise`.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj
return None
def is_urdf_element(obj):
"""Test if XML element is an URDF element."""
from ..types import XMLBase
return obj.__class__ in XMLBase.__subclasses__() and \
obj._TYPE == 'urdf'
__all__ = [
'get_all_urdf_element_classes',
'create_urdf_element',
'create_urdf_type',
'is_urdf_element',
'Actuator',
'Axis',
'Box',
'Child',
'Collision',
'Color',
'Cylinder',
'Dynamics',
'Gazebo',
'Geometry',
'HardwareInterface',
'Inertia',
'Inertial',
'Joint',
'Limit',
'Link',
'Mass',
'Material',
'MechanicalReduction',
'Mesh',
'Mimic',
'Origin',
'Parent',
'Robot',
'SafetyController',
'Sphere',
'Texture',
'Transmission',
'Type',
'Visual'
]
| 1.945313 | 2 |
homework/supporting.py | viaviare/MyFirstRepository | 0 | 11230 | <reponame>viaviare/MyFirstRepository
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
class Supporting:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def is_element_present(self, driver, *args):
try:
self.driver.find_element(*args)
return True
except NoSuchElementException:
return False
def implicit_wait(self):
self.driver.implicitly_wait(10) | 2.640625 | 3 |
vplsSinVlan.py | javicond3/mininetVPLS | 0 | 11231 | """Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
h1 = self.addHost('h1', mac='00:00:00:00:00:01')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
s5 = self.addSwitch('s5')
s6 = self.addSwitch('s6')
# Add links
self.addLink(s1, h1, port1=1, port2=0)
self.addLink(s2, h2, port1=1, port2=0)
self.addLink(s3, h3, port1=1, port2=0)
self.addLink(s4, h4, port1=1, port2=0)
self.addLink(s5, h5, port1=1, port2=0)
self.addLink(s6, h6, port1=1, port2=0)
self.addLink(s1, s2)
self.addLink(s2, s3)
self.addLink(s3, s4)
self.addLink(s4, s1)
self.addLink(s4, s2)
self.addLink(s1, s5)
self.addLink(s4, s5)
self.addLink(s2, s6)
self.addLink(s3, s6)
topos = { 'vpls': ( lambda: MyTopo() ) }
| 3.953125 | 4 |
modin/engines/base/io/column_stores/feather_dispatcher.py | webclinic017/modin | 1 | 11232 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `FeatherDispatcher` class, that is used for reading `.feather` files."""
from modin.engines.base.io.column_stores.column_store_dispatcher import (
ColumnStoreDispatcher,
)
class FeatherDispatcher(ColumnStoreDispatcher):
"""
Class handles utils for reading `.feather` files.
Inherits some common for columnar store files util functions from
`ColumnStoreDispatcher` class.
"""
@classmethod
def _read(cls, path, columns=None, **kwargs):
"""
Read data from the file path, returning a query compiler.
Parameters
----------
path : str or file-like object
The filepath of the feather file.
columns : array-like, optional
Columns to read from file. If not provided, all columns are read.
**kwargs : dict
`read_feather` function kwargs.
Returns
-------
BaseQueryCompiler
Query compiler with imported data for further processing.
Notes
-----
`PyArrow` engine and local files only are supported for now,
multi threading is set to False by default.
PyArrow feather is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/api.html#feather-format
"""
if columns is None:
from pyarrow.feather import read_feather
df = read_feather(path)
# pyarrow.feather.read_feather doesn't support columns as pandas.Index
columns = list(df.columns)
return cls.build_query_compiler(path, columns, use_threads=False)
| 2.125 | 2 |
rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | 0 | 11233 | import pytest
from npRNN.tree_utils import Node, NodeTree
def test_merge_results():
#sentence='I know a name of the cat on a hat'
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
tree=NodeTree(words, [0, 5, 3, 1, 2, 0, 0])
assert tree.phrase.name =='(((a name) (of the)) ((cat on) (a hat)))'
assert tree.phrase.depth==3
assert tree.history == [0, 5, 3, 1, 2, 0, 0]
tree=NodeTree(words, [0, 5, 0, 0, 1, 1, 0])
assert tree.phrase.name =='((((a name) of) the) ((cat on) (a hat)))'
assert tree.phrase.depth==4
assert tree.history == [0, 5, 0, 0, 1, 1, 0]
tree=NodeTree(words, [2,0,3,2,2,0,0])
assert tree.phrase.name =='(((a name) (of the)) ((cat (on a)) hat))'
assert tree.phrase.depth==4
assert tree.history == [2,0,3,2,2,0,0]
def test_merge_dicrection():
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
merge_history=[3,1,1,0,2,1,0]
all_nodes, _ =NodeTree.directed_merge(words,merge_history)
print all_nodes
composites=all_nodes[len(words):]
print composites
left_merged=NodeTree.get_merge_direction(composites)
expected_left_merged = [[True, False, False, True],[True, True, False, True],\
[True, False, True],[True, True],[True, False, False],[True, False],[True]]
assert left_merged == expected_left_merged
depths = [x.depth for x in composites]
assert depths==[1, 1, 2, 3, 1, 2, 4]
| 2.90625 | 3 |
pyrefine/script.py | jezcope/pyrefine | 27 | 11234 | """A script is a series of operations."""
import json
import os
from .ops import create
class Script(object):
"""A script is a series of operations."""
def __init__(self, s=None):
"""Parse a script from a JSON string."""
if s is not None:
self.parsed_script = json.loads(s)
self.operations = [create(params)
for params in self.parsed_script]
def __len__(self):
"""Return the number of operations."""
return len(self.operations)
def execute(self, data):
"""Execute all operations on the provided dataset.
Args:
data (:class:`pandas.DataFrame`): The data to transform. Not
guaranteed immutable.
Returns:
:class:`pandas.DataFrame`: The transformed data.
"""
for op in self.operations:
data = op(data)
return data
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
| 3.609375 | 4 |
makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 0 | 11235 | <reponame>dualspiral/makecourse<filename>makeCourse/plastex/mhchem/__init__.py<gh_stars>0
from plasTeX import Command, Environment, sourceChildren
from plasTeX.Base.LaTeX import Math
from plasTeX.Base.TeX.Primitives import BoxCommand
# mhchem package - mostly handled by mathjax
# Overrive boxcommands inside MathJaX to avoid extra <script type="math/tex">
class MHBoxCommand(BoxCommand):
class math(Math.math):
@property
def source(self):
if self.hasChildNodes():
return u'$%s$' % sourceChildren(self)
return '$'
class ce(MHBoxCommand):
args = 'self'
class pu(MHBoxCommand):
args = 'self'
| 2.140625 | 2 |
src/data_augmentation.py | pallabganguly/gestures-cnn | 1 | 11236 | """
Totally untested file. Will be removed in subsequent commits
"""
import tensorflow as tf
import matplotlib.image as mpimg
import numpy as np
from math import ceil, floor
import os
IMAGE_SIZE = 720
def central_scale_images(X_imgs, scales):
# Various settings needed for Tensorflow operation
boxes = np.zeros((len(scales), 4), dtype = np.float32)
for index, scale in enumerate(scales):
x1 = y1 = 0.5 - 0.5 * scale # To scale centrally
x2 = y2 = 0.5 + 0.5 * scale
boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32)
box_ind = np.zeros((len(scales)), dtype = np.int32)
crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32)
X_scale_data = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3))
# Define Tensorflow operation for all scales but only one base image at a time
tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img_data in X_imgs:
batch_img = np.expand_dims(img_data, axis = 0)
scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img})
X_scale_data.extend(scaled_imgs)
X_scale_data = np.array(X_scale_data, dtype = np.float32)
return X_scale_data
from math import ceil, floor
def get_translate_parameters(index):
if index == 0: # Translate left 20 percent
offset = np.array([0.0, 0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = 0
w_end = int(ceil(0.8 * IMAGE_SIZE))
h_start = 0
h_end = IMAGE_SIZE
elif index == 1: # Translate right 20 percent
offset = np.array([0.0, -0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = int(floor((1 - 0.8) * IMAGE_SIZE))
w_end = IMAGE_SIZE
h_start = 0
h_end = IMAGE_SIZE
elif index == 2: # Translate top 20 percent
offset = np.array([0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = 0
h_end = int(ceil(0.8 * IMAGE_SIZE))
else: # Translate bottom 20 percent
offset = np.array([-0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = int(floor((1 - 0.8) * IMAGE_SIZE))
h_end = IMAGE_SIZE
return offset, size, w_start, w_end, h_start, h_end
def translate_images(X_imgs):
offsets = np.zeros((len(X_imgs), 2), dtype = np.float32)
n_translations = 4
X_translated_arr = []
tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(n_translations):
X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3),
dtype = np.float32)
X_translated.fill(0.0) # Filling background color
base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i)
offsets[:, :] = base_offset
glimpses = tf.image.extract_glimpse(X_imgs, size, offsets)
glimpses = sess.run(glimpses)
X_translated[:, h_start: h_start + size[0], \
w_start: w_start + size[1], :] = glimpses
X_translated_arr.extend(X_translated)
X_translated_arr = np.array(X_translated_arr, dtype = np.float32)
return X_translated_arr
def rotate_images(X_imgs):
X_rotate = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
k = tf.placeholder(tf.int32)
tf_img = tf.image.rot90(X, k = k)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
for i in range(3): # Rotation at 90, 180 and 270 degrees
rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1})
X_rotate.append(rotated_img)
X_rotate = np.array(X_rotate, dtype = np.float32)
return X_rotate
def flip_images(X_imgs):
X_flip = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
tf_img1 = tf.image.flip_left_right(X)
tf_img2 = tf.image.flip_up_down(X)
tf_img3 = tf.image.transpose_image(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img})
X_flip.extend(flipped_imgs)
X_flip = np.array(X_flip, dtype = np.float32)
return X_flip
# Produce each image at scaling of 90%, 75% and 60% of original image.
X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/")
scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60])
translated_imgs = translate_images(X_imgs)
rotated_imgs = rotate_images(X_imgs)
flipped_images = flip_images(X_imgs)
| 2.40625 | 2 |
link_to_the_past/hashes.py | zsquareplusc/lttp-backup | 0 | 11237 | #!/usr/bin/env python3
# encoding: utf-8
#
# (C) 2012-2016 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Link To The Past - a backup tool
Hash functions and commands.
"""
import hashlib
import zlib
class CRC32(object):
"""\
CRC32 API compatible to the hashlib functions (subset used by this program).
>>> h = CRC32()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'4a17b156'
"""
def __init__(self):
self.value = 0
def update(self, data):
self.value = zlib.crc32(data, self.value) & 0xffffffff
def hexdigest(self):
return '{:08x}'.format(self.value)
class NoHash(object):
"""\
API compatible to the hashlib functions (subset used by this program).
>>> h = NoHash()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'-'
"""
def __init__(self):
pass
def update(self, data):
pass
def hexdigest(self):
return '-'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SUPPORTED_HASHES = {
'NONE': NoHash,
'CRC32': CRC32,
'MD5': hashlib.md5,
'SHA-256': hashlib.sha256,
'SHA-512': hashlib.sha512,
}
def get_factory(name):
"""\
Get an object for calculating a hash.
>>> f = get_factory('SHA-256')
>>> h = f()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e'
"""
if name is None:
name = 'NONE'
return SUPPORTED_HASHES[name.upper()]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.6875 | 3 |
Interview/langTrans.py | dnootana/Python | 1 | 11238 | <reponame>dnootana/Python<filename>Interview/langTrans.py
#!/usr/bin/env python3.8
table="".maketrans("0123456789","\N{Devanagari digit zero}\N{Devanagari digit one}"
"\N{Devanagari digit two}\N{Devanagari digit three}"
"\N{Devanagari digit four}\N{Devanagari digit five}"
"\N{Devanagari digit six}\N{Devanagari digit seven}"
"\N{Devanagari digit eight}\N{Devanagari digit nine}")
print("0123456789".translate(table)) | 2.078125 | 2 |
cutde/opencl.py | brendanjmeade/cutde | 1 | 11239 | <gh_stars>1-10
import logging
import warnings
import pyopencl
import pyopencl.array
logger = logging.getLogger(__name__)
gpu_initialized = False
gpu_ctx = None
gpu_queue = None
def report_devices(ctx):
device_names = [d.name for d in ctx.devices]
logger.info("initializing opencl context with devices = " + str(device_names))
def initialize_with_ctx(ctx):
global gpu_initialized, gpu_ctx, gpu_queue
gpu_ctx = ctx
gpu_queue = pyopencl.CommandQueue(
gpu_ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE
)
gpu_initialized = True
report_devices(ctx)
def avoid_apple_cpu(ctx):
"""
The Apple CPU OpenCL implementation is awful. Instead, we should just use
PoCL.
"""
if ctx.devices[0].platform.name == "Apple" and "CPU" in ctx.devices[0].name:
platforms = pyopencl.get_platforms()
platform_idx = None
for i, p in enumerate(platforms):
if p.name != "Apple":
platform_idx = i
else:
apple_platform_idx = i
if platform_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different platform: {p.name}"
)
return pyopencl.create_some_context(answers=[str(platform_idx)])
# If no other platforms were found, let's try to
# find a non-CPU device like an Iris Pro.
platform_idx = apple_platform_idx
device_idx = None
for i, d in enumerate(platforms[platform_idx].get_devices()):
if "CPU" in d.name:
continue
device_idx = i
break
if device_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different device: {d.name}"
)
return pyopencl.create_some_context(
answers=[str(platform_idx), str(device_idx)]
)
raise NotImplementedError(
"cutde does not support the Apple CPU OpenCL implementation and no other"
" platform or device was found. Please consult the cutde README"
)
return ctx
def ensure_initialized():
global gpu_initialized
if not gpu_initialized:
ctx = pyopencl.create_some_context()
ctx = avoid_apple_cpu(ctx)
initialize_with_ctx(ctx)
def ptr(arr):
if type(arr) is pyopencl.array.Array:
return arr.data
return arr
def to_gpu(arr, float_type):
ensure_initialized()
if type(arr) is pyopencl.array.Array:
return arr
to_type = arr.astype(float_type)
return pyopencl.array.to_device(gpu_queue, to_type)
def zeros_gpu(shape, float_type):
ensure_initialized()
return pyopencl.array.zeros(gpu_queue, shape, float_type)
def empty_gpu(shape, float_type):
ensure_initialized()
return pyopencl.array.empty(gpu_queue, shape, float_type)
def threaded_get(arr):
return arr.get()
class ModuleWrapper:
def __init__(self, module):
self.module = module
def __getattr__(self, name):
kernel = getattr(self.module, name)
def provide_queue_wrapper(*args, grid=None, block=None, **kwargs):
global_size = [b * g for b, g in zip(grid, block)]
arg_ptrs = [ptr(a) for a in args]
return kernel(gpu_queue, global_size, block, *arg_ptrs, **kwargs)
return provide_queue_wrapper
def compile(code):
ensure_initialized()
compile_options = []
# debug_opts = ["-g", "-Werror"]
# compile_options.extend(debug_opts)
fast_opts = [
# '-cl-finite-math-only',
"-cl-unsafe-math-optimizations",
# '-cl-no-signed-zeros',
"-cl-mad-enable",
# '-cl-strict-aliasing'
]
compile_options.extend(fast_opts)
return ModuleWrapper(pyopencl.Program(gpu_ctx, code).build(options=compile_options))
cluda_preamble = """
// taken from pyopencl._cluda
#define LOCAL_BARRIER barrier(CLK_LOCAL_MEM_FENCE)
// 'static' helps to avoid the "no previous prototype for function" warning
#if __OPENCL_VERSION__ >= 120
#define WITHIN_KERNEL static
#else
#define WITHIN_KERNEL
#endif
#define KERNEL __kernel
#define GLOBAL_MEM __global
#define LOCAL_MEM __local
#define LOCAL_MEM_DYNAMIC __local
#define LOCAL_MEM_ARG __local
#define CONSTANT __constant
// INLINE is already defined in Beignet driver
#ifndef INLINE
#define INLINE inline
#endif
#define SIZE_T size_t
#define VSIZE_T size_t
// used to align fields in structures
#define ALIGN(bytes) __attribute__ ((aligned(bytes)))
#if defined(cl_khr_fp64)
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#elif defined(cl_amd_fp64)
#pragma OPENCL EXTENSION cl_amd_fp64: enable
#endif
"""
| 2.484375 | 2 |
tests/test_lamost_tools.py | igomezv/astroNN | 156 | 11240 | import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
| 2.359375 | 2 |
phase-iii-client/services/aureas/views.py | williamegomez/AUREAS | 5 | 11241 | from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import JSONParser
import numpy as np
import json
import os
from .utils.spectrogram_utils import SpectrogramUtils
from .utils.feature_extraction_utils import FeatureExtractionUtils
from .utils.classification_utils import ClassificationUtils
from .utils.file_utils import FileUtils
from .utils.dir_utils import DirUtils
from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display
file_utils = FileUtils()
dir_utils = DirUtils()
@api_view(['GET'])
@parser_classes((JSONParser,))
def get_species(request):
species = os.listdir('clusters/model/')
species_data = []
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
data = json.load(infile)
species_data.append(data)
return HttpResponse(json.dumps(species_data, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
datanorm, mininums, maximums = classification_utils.norm(data)
recon, mean_class, std_class = classification_utils.lamda(
ex_level, it_num, datanorm, mad, gad)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
values = [value[0], str(recon[i]), *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_segment_in_image(request):
if request.method == 'POST':
data = request.data
spectrogram_utils = SpectrogramUtils()
filename = spectrogram_utils.get_segment_in_image(data['dir'],
data['filename'], 1, float(data['start']) - 0.5, float(data['end']) + 0.5, float(data['min_freq']) - 200, float(data['max_freq']) + 200)
response = {
'url': filename
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def save_cluster(request):
if request.method == 'POST':
data = request.data
features = np.array(data['model']['features'])
min_values = data['model']['min_values']
max_values = data['model']['max_values']
metadata = np.array(data['model']['metadata'])
indices = np.array(data['selected'])
audio_path, image_path, metadata_representative = file_utils.save_representative_call(
data['name'], features[indices], metadata[indices])
model = {
'name': data['name'],
'metadata': metadata_representative.tolist(),
'mean_values': np.mean(features[indices], axis=0).tolist(),
'std_values': np.std(features[indices], axis=0).tolist(),
'min_values': min_values,
'max_values': max_values,
'image_path': image_path,
'audio_path': audio_path
}
dir_utils.create_dir('clusters/model/')
with open('clusters/model/' + data['name'], 'w') as outfile:
json.dump(model, outfile)
return HttpResponse(json.dumps(model, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def search_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
species = data['species']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
num_datos, num_feat = data.shape
mean_class = 0.5 * np.ones((1, num_feat))
std_class = 0.25 * np.ones((1, num_feat))
min_values = np.empty((0, num_feat))
max_values = np.empty((0, num_feat))
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
model = json.load(infile)
mean_class = np.vstack(
(mean_class, np.array(model['mean_values'])))
std_class = np.vstack(
(std_class, np.array(model['std_values'])))
min_values = np.vstack(
(min_values, np.array(model['min_values'])))
max_values = np.vstack(
(max_values, np.array(model['max_values'])))
general_min_values = np.min(min_values, axis=0)
general_max_values = np.max(max_values, axis=0)
datanorm, mininums, maximums = classification_utils.norm(
data, general_min_values, general_max_values)
recon = classification_utils.predict_lamda(
ex_level, datanorm, mad, gad, mean_class, std_class)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
species_name = species[recon[i] - 1] if recon[i] > 0 else 'NIC'
values = [value[0], species_name, *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
value[0] = species[i - 1] if i > 0 else 'NIC'
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
| 2.25 | 2 |
PyPoll/Homework/main.py | VioletData/python-challenge | 0 | 11242 | <filename>PyPoll/Homework/main.py
# Modules
import os
import csv
#Set up path for file
csvpath=os.path.join("..", "Resources", "election_data.csv" )
#print(csvpath)
total_votes=0
#total_profit=0
#previous_value=0
#current_value=0
#list_changes=[]
print("Election Results")
print("---------------------")
#Open the csv file
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
#print(csvreader)
#Read the header row
csv_header=next(csvreader)
#print(f"CSV Header: {csv_header}")
#Read each row of data after the header
for row in csvreader:
total_votes=total_votes+1
current_value=int(row[0])
#total_profit=total_profit+1
#current_value=int(row[1])
#monthly_diff=current_value-previous_value
#list_changes.append(monthly_diff)
#list_changes.remove("867884")
#previous_value=current_value
#avg_monthly_diff=sum[list_changes]
# Calculate the average of the changes in Profit/Lossess over the entire period
# Determine the greateest increase in profits (date and amount) over the entire period
# Determine the greaterst decrease in losses (datea and amount) ove the entire period
print("Total Votes: " + str(total_votes))
print("---------------------")
#print("Total: $"+str(total_profit))
print("---------------------")
#print("Average Change: $" +str(total_profit))
print("---------------------")
#print(row)
| 4.0625 | 4 |
test/qa-tests/buildscripts/resmokelib/logging/__init__.py | Mrliu8023/mongo-tools | 1 | 11243 | <gh_stars>1-10
"""
Extension to the logging package to support buildlogger.
"""
# Alias the built-in logging.Logger class for type checking arguments. Those interested in
# constructing a new Logger instance should use the loggers.new_logger() function instead.
from logging import Logger
from . import config
from . import buildlogger
from . import flush
from . import loggers
| 1.773438 | 2 |
cohesity_management_sdk/models/azure_cloud_credentials.py | chandrashekar-cohesity/management-sdk-python | 1 | 11244 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class AzureCloudCredentials(object):
"""Implementation of the 'AzureCloudCredentials' model.
Specifies the cloud credentials to connect to a Microsoft
Azure service account.
Attributes:
storage_access_key (string): Specifies the access key to use when
accessing a storage tier in a Azure cloud service.
storage_account_name (string): Specifies the account name to use when
accessing a storage tier in a Azure cloud service.
tier_type (TierTypeAzureCloudCredentialsEnum): Specifies the storage
class of Azure. AzureTierType specifies the storage class for
Azure. 'kAzureTierHot' indicates a tier type of Azure properties
that is accessed frequently. 'kAzureTierCool' indicates a tier
type of Azure properties that is accessed less frequently, and
stored for at least 30 days. 'kAzureTierArchive' indicates a tier
type of Azure properties that is accessed rarely and stored for at
least 180 days.
"""
# Create a mapping from Model property names to API property names
_names = {
"storage_access_key":'storageAccessKey',
"storage_account_name":'storageAccountName',
"tier_type":'tierType'
}
def __init__(self,
storage_access_key=None,
storage_account_name=None,
tier_type=None):
"""Constructor for the AzureCloudCredentials class"""
# Initialize members of the class
self.storage_access_key = storage_access_key
self.storage_account_name = storage_account_name
self.tier_type = tier_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
storage_access_key = dictionary.get('storageAccessKey')
storage_account_name = dictionary.get('storageAccountName')
tier_type = dictionary.get('tierType')
# Return an object of this model
return cls(storage_access_key,
storage_account_name,
tier_type)
| 2.796875 | 3 |
src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 0 | 11245 | from utils.utils import *
lines = get_input(__file__)
lines_as_nums = lines_to_nums(lines)
def part1(nums):
incr = 0
cur = nums[0]
for num in nums:
if num > cur:
incr += 1
cur = num
return incr
def part2():
nums = []
for i in range(len(lines_as_nums)):
if i < len(lines_as_nums) - 2:
nums.append(lines_as_nums[i] + lines_as_nums[i + 1] + lines_as_nums[i + 2])
return part1(nums)
print("part1:", part1(lines_as_nums))
print("part2:", part2())
| 3.609375 | 4 |
scripts/sqlite_firestore_migration.py | namuan/news-rider | 5 | 11246 | <reponame>namuan/news-rider<gh_stars>1-10
import datetime
import os
import sys
from google.cloud import firestore
from peewee import *
sys.path.append(os.getcwd())
home_dir = os.getenv('HOME')
db_file_path = os.getcwd() + '/../../data/news_rider.db'
print("Reading database from {}".format(db_file_path))
old_db = SqliteDatabase(db_file_path)
class NewsItem(Model):
NewsUrl = CharField(primary_key=True)
NewsTitle = CharField()
TimeStamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = old_db
db = firestore.Client()
posts_ref = db.collection('posts')
def save_data(url, title, timestamp):
print(f"Adding {url} for database")
posts_ref.add({
'news_url': url,
'news_title': title,
'timestamp': timestamp
})
def exists_in_database(url):
print(f"Checking if {url} exists in database")
news_found_ref = posts_ref.where('news_url', '==', url).limit(1)
return next(news_found_ref.get(), None) is not None
if __name__ == '__main__':
for news_item in NewsItem.select():
if not exists_in_database(news_item.NewsUrl):
save_data(news_item.NewsUrl, news_item.NewsTitle, news_item.TimeStamp)
| 2.921875 | 3 |
tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 1 | 11247 | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorforce.core.baselines import NetworkBaseline
class MLPBaseline(NetworkBaseline):
"""
Multi-layer perceptron baseline (single-state) consisting of dense layers.
"""
def __init__(self, sizes, scope='mlp-baseline', summary_labels=()):
"""
Multi-layer perceptron baseline.
Args:
sizes: List of dense layer sizes
"""
layers_spec = []
for size in sizes:
layers_spec.append({'type': 'dense', 'size': size})
super(MLPBaseline, self).__init__(layers_spec, scope, summary_labels)
| 1.953125 | 2 |
km3pipe/utils/rtree.py | kabartay/km3pipe | 2 | 11248 | # coding=utf-8
# Filename: h5tree.py
"""
Print the ROOT file structure.
Usage:
rtree FILE
rtree (-h | --help)
rtree --version
Options:
FILE Input file.
-h --help Show this screen.
"""
from __future__ import division, absolute_import, print_function
from km3pipe.io.root import open_rfile
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME> and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def rtree(rfile):
rfile = open_rfile(rfile)
for k in rfile.walk():
print(k)
rfile.close()
def main():
from docopt import docopt
arguments = docopt(__doc__)
rtree(arguments['FILE'])
| 2.984375 | 3 |
task_part1_learning.py | till-lu/cit_lcp_2020 | 0 | 11249 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from psychopy.visual import Window, TextStim
from psychopy.core import wait, Clock, quit
from psychopy.event import clearEvents, waitKeys, Mouse
from psychopy.gui import Dlg
from time import gmtime, strftime
from codecs import open
from random import shuffle, choice, randint
from copy import deepcopy
from psychopy.iohub import launchHubServer
from numpy import mean, std
from datetime import datetime
from itertools import permutations
import random
## for testing
testing = False # True for testing, False for real recording
###
main_ddline = 1 # sec
isi_set = (500, 800, 1100)
instruction_color = '#111111' #formerly = #9999FF
############ MAIN ITEMS - paste from JS
probe_crime_list_1 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : <NAME>\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Pläne\n\n Adresse : Hai Straße'
probe_crime_list_2 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : Weißes Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Pläne\n\n Adresse : Löwen Straße'
crime_list_1 = ["<NAME>", "<NAME>", "Operation Kuh", "Regen Akte", "Helikopter Pläne", "Hai Straße"]
crime_list_2 = ["<NAME>", "Weißes Shirt","Operation Fichte","Eulen Akte","Messing Pläne","Löwen Straße"]
dummy_list_numbers = [0, 1, 2, 3, 4, 5]
training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'}
rounds = 1
if testing:
escape_key = 'escape'
instr_wait = 0.1
else:
escape_key = 'notallowed'
instr_wait = 0.5
# EXECUTE all main functions here
def execute():
start_input() # prompt to input stuff
# now initiate stuff
set_screen() # creates psychopy screen and stim objects
# window opens
create_file() # created output file
consent_instructions()
training_instruction()
which_round_indicator()
training_software()
which_round_indicator()
training_list()
training_software()
which_round_indicator()
training_list()
training_software()
final_slide()
win.mouseVisible = False # hide mouse
print("************** END OF LEARNING TASK **************")
ending() # saves demographic & final infos, gives feedback
waitKeys(keyList = ['b']) # press B to end the exp (prevents subject from closing window)
quit()
def consent_instructions():
show_instruction("Bitte füllen Sie die Einverständniserklärung zur Teilnahme am Experiment aus. \nSie sollten diese vor sich auf dem Tisch finden. Bei Unklarheiten oder weiteren Fragen heben Sie leise Ihre Hand.\nWenn Sie damit fertig sind, drücken Sie die Leertaste, um mit dem Experiment zu starten.")
show_instruction("Sie werden nun eine Reihe von Aufgaben am Computer durchführen. Bitte lesen und befolgen Sie die Anweisungen sorgfältig. Sollten Sie während des Experiments Fragen haben, melden Sie sich bei der Versuchsleitung, bevor Sie fortfahren.\nDrücken Sie die Leertaste, um die Anweisungen zu sehen.")
def which_round_indicator():
global condition
if rounds == 1:
show_instruction("Es folgt nun die erste Runde, in der die soeben gezeigten Wortpaare abgefragt werden. Geben Sie diese exakt so, wie sie Ihnen eben gezeigt wurden, ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 2:
show_instruction("Es folgen erneut alle Informationen, die Sie benötigen, wenn Sie sich als Komplize ausgeben. Damit diese Täuschung funktioniert, ist es sehr wichtig, dass jedes Detail der Nachricht korrekt ist. Bitte prägen Sie sich deshalb erneut alle Informationen ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 3:
show_instruction("Es folgt nun eine dritte und letzte Runde. Die Wortpaare werden noch einmal gezeigt, bevor diese ein letztes Mal abgefragt werden.\nLeertaste drücken, um fortzufahren.")
def training_instruction():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Sie sollen eine Person kontaktieren, die unter Verdacht steht, kriminelle Aktivitäten begangen zu haben. Schreiben Sie dieser Person eine E-Mail, in der Sie um die Übergabe illegal erlangter Dokumente bitten. Dazu geben Sie sich als einer der Komplizen der Person aus und loggen sich in den Mail-Account dieses Komplizen ein. In der Nachricht bitten Sie den Verdächtigen, dass er Sie an einem bestimmten Ort trifft und die entsprechenden Dokumente bei sich hat. Die Informationen, die Sie für diese Aufgabe benötigen werden, werden Ihnen gleich präsentiert.\n\nDrücken Sie die Leertaste um fortzufahren.')
show_instruction('Für das Verfassen der E-Mail werden Sie die folgenden Informationen brauchen. Sie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen. Drücken Sie daher erst die Leertaste, wenn Sie die unten stehenden Wortpaare, die für das Verfassen der Nachricht benötigt werden, gründlich auswendig gelernt haben. Im Folgenden werden diese in drei Runden abgefragt.\n\n' + probe_crime_list)
def training_list():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Drücken Sie die Leertaste, wenn Sie die unten stehenden Items gründlich auswendig gelernt haben.\nSie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen.\n\n' + probe_crime_list)
def training_software():
global condition, required, typedin, rounds
required_items = []
if condition % 2 != 0:
required_items = crime_list_1
else:
required_items = crime_list_2
combine_shuffle = list(zip(required_items, dummy_list_numbers))
shuffle(combine_shuffle)
required_items[:], dummy_list_numbers[:] = zip(*combine_shuffle)
counter = 0
while counter <= 5:
required = required_items[counter]
cue = training_recall_item[dummy_list_numbers[counter]]
counter += 1
instr_display = TextStim(win, color=instruction_color, font='Helvetica', text = u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.', pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb')
input_prompt = TextStim(win, color=instruction_color, font='Helvetica', text = cue + ':', pos=(-100, 0), alignHoriz = 'right', height=35)
input_display = TextStim(win, color='black', pos=(-100, -4), alignHoriz = 'left', height=35, bold = True, colorSpace='rgb')
typedin = ''
while True:
input_display.setText(typedin)
instr_display.draw()
input_prompt.draw()
input_display.draw()
win.flip()
char = waitKeys()[0]
if char == 'backspace' and len(typedin) > 0:
typedin = typedin[:-1]
elif char == escape_key:
break
elif char == 'return':
if len( trm(typedin) ) > 0:
break
elif len(char) == 1 and char.isalpha():
typedin += char.upper()
elif char == 'space':
typedin += ' '
elif char == 'comma':
typedin += ','
typedin_words = trm(typedin)
add_resp()
if counter <= 5:
wait(0.5)
else:
break
rounds += 1
def final_slide():
show_instruction("Sie haben nun alle relevanten Informationen gelernt. Bitte führen Sie die Aufgabe nun aus, indem Sie im Google Chrome Browser auf webmail.univie.ac.at gehen und sich dort mit dem eingespeicherten user:account einloggen und die Nachricht mit den gelernten Informationen verfassen und senden. Wenden Sie sich bitte an die Versuchsleitung, um zum Desktop zu gelangen und führen Sie die Aufgabe dann eigenständig aus. Sollten Sie weitere Fragen haben, wenden Sie sich bitte ebenfalls an die Versuchsleitung.")
waitKeys(keyList = ['b'])
def set_screen(): # screen properties
global win, start_text, left_label, right_label, center_disp, instruction_page
win = Window([1280, 1000], color='#dddddd', fullscr = 1, units = 'pix', allowGUI = True) # 1280 1024
start_text = TextStim(win, color=instruction_color, font='Helvetica', text = u'Um anzufangen, bitte die Leertaste drücken.', pos = [0,-300], height=35, bold = True, wrapWidth= 1100)
left_label = TextStim(win, color='#111111', font='Verdana', text = 'unvertraut', pos = [-350,-160], height=35, alignHoriz='center')
right_label = TextStim(win, color='#111111', font='Verdana', text = 'vertraut', pos = [350,-160], height=35, alignHoriz='center')
center_disp = TextStim(win, color='#111111', font='Arial', text = '', height = 60)
instruction_page = TextStim(win, wrapWidth = 1200, height = 28, font='Helvetica', color = instruction_color)
def start_input():
global subj_id, dems, condition, gender
input_box = Dlg(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen')
input_box.addText(text=u'')
input_box.addField(label=u'c.', tip = '1-8')
input_box.addField(label=u'VP', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.addText(text=u'Bitte ausfüllen:')
input_box.addField(label=u'Geschlecht', initial = '', choices=[u'männlich',u'weiblich', u'divers'] )
input_box.addField(label=u'Alter', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.show()
if input_box.OK:
stop = False
try:
condition = int(input_box.data[0])
except ValueError:
condition = 99
print("Condition must be a number!")
## CONDITIONS:
# use condition nos. for control vs. experimental group
# plus for guilty vs innocent block first
# 1 probes 1 + exp + crime first
# 2 probes 2 + exp + nocrime first
# 3 probes 1 + exp + nocrime first
# 4 probes 2 + exp + crime first
# 5 probes 1 + control + crime first
# 6 probes 2 + control + no crime first
# 7 probes 1 + control + no crime first
# 8 probes 2 + control + crime first first
# check if variables correctly given
if condition not in range(1,9):
if testing:
condition = 1 # set value for testing to skip Dlg input box
print("condition was not set, now set to " + str(condition) + " for testing.")
else:
print("condition was not set correctly (should be 1/2/3/4/5/6/7/8)")
stop = True
try:
subj_num = int(input_box.data[1])
except ValueError:
if testing:
subj_num = 99 # set value for testing to skip Dlg input box
print("subj_num was not set, now set to " + str(subj_num) + " for testing.")
else:
print("vp (subject number) was not set correctly (should be simple number)")
stop = True
try:
age = int(input_box.data[3])
except ValueError:
if testing:
age = 11 # set value for testing to skip Dlg input box
print("age was not set, now set to " + str(age) + " for testing.")
else:
print("age was not set correctly (should be simple number)")
stop = True
if stop:
print("\nTry again with correct inputs.\n")
quit()
subj_id = str(subj_num).zfill(3) + "_" + str(strftime("%Y%m%d%H%M%S", gmtime()))
if input_box.data[2] == 'weiblich':
gender = 2
elif input_box.data[2] == 'männlich':
gender = 1
else:
gender = 3
dems = 'dems\tgender/age\t' + str(gender) + '/' + str(age)
start_date = datetime.now()
else:
quit()
def create_file():
global data_out
f_name = 'lcp1_learning_' + str(condition) + "_" + subj_id + '.txt'
data_out=open(f_name, 'a', encoding='utf-8')
data_out.write( '\t'.join( [ "subject_id", "condition", "probe_item", "typed_in", "similarityscore", "rounds" ] ) + "\n" )
print("File created:", f_name)
def show_instruction(instruction_text):
instruction_page.setText(instruction_text)
instruction_page.draw()
win.flip()
wait(instr_wait)
inst_resp = waitKeys(keyList = ['space', escape_key])
end_on_esc(inst_resp[0])
def end_on_esc(escap):
if escap == escape_key : # escape
print("Trying to escape?")
instruction_page.setText('Sure you want to discontinue and quit the experiment?\n\nPress "y" to quit, or press "n" to continue.')
instruction_page.draw()
win.flip()
wait(1)
quit_resp = waitKeys(keyList = ['y', 'n'])
if quit_resp[0] == 'y':
print("************ ESCAPED ************")
data_out.close()
win.close()
quit()
else:
clearEvents()
print("Continuing...")
# from https://github.com/luosch/similar_text
def similar_str(str1, str2):
"""
return the len of longest string both in str1 and str2
and the positions in str1 and str2
"""
max_len = tmp = pos1 = pos2 = 0
len1, len2 = len(str1), len(str2)
for p in range(len1):
for q in range(len2):
tmp = 0
while p + tmp < len1 and q + tmp < len2 \
and str1[p + tmp] == str2[q + tmp]:
tmp += 1
if tmp > max_len:
max_len, pos1, pos2 = tmp, p, q
return max_len, pos1, pos2
def similar_char(str1, str2):
"""
return the total length of longest string both in str1 and str2
"""
max_len, pos1, pos2 = similar_str(str1, str2)
total = max_len
if max_len != 0:
if pos1 and pos2:
total += similar_char(str1[:pos1], str2[:pos2])
if pos1 + max_len < len(str1) and pos2 + max_len < len(str2):
total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]);
return total
def similar_text(str1, str2):
"""
return a int value in [0, 100], which stands for match level
"""
if not (isinstance(str1, str) or isinstance(str1, unicode)):
raise TypeError("must be str or unicode")
elif not (isinstance(str2, str) or isinstance(str2, unicode)):
raise TypeError("must be str or unicode")
elif len(str1) == 0 and len(str2) == 0:
return 0.0
else:
return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2)))
def trm(raw_inp):
return [w for w in raw_inp.replace(',', ' ').split(' ') if w != ''][:2]
def add_resp():
global condition, required
data_out.write( '\t'.join( [ str(subj_id), str(condition), str(required), str(typedin), str(similar_text(str(required.upper()), str(typedin)))]) + '\t' + str(rounds) + '\n' )
print(required, str(typedin), similar_text(str(required.upper()), str(typedin)))
def ending ():
data_out.write(dems + "\n")
data_out.close()
show_instruction( "ENDE" )
# EXECUTE
execute()
| 2.265625 | 2 |
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_asm_policy_import.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | 11250 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy_import
short_description: Manage BIG-IP ASM policy imports
description:
- Manage BIG-IP ASM policies policy imports.
version_added: 2.8
options:
name:
description:
- The ASM policy to create or override.
type: str
required: True
inline:
description:
- When specified the ASM policy is created from a provided string.
- Content needs to be provided in a valid XML format otherwise the operation will fail.
type: str
source:
description:
- Full path to a policy file to be imported into the BIG-IP ASM.
- Policy files exported from newer versions of BIG-IP cannot be imported into older
versions of BIG-IP. The opposite, however, is true; you can import older into
newer.
- The file format can be binary of XML.
type: path
force:
description:
- When set to C(yes) any existing policy with the same name will be overwritten by the new import.
- Works for both inline and file imports, if the policy does not exist this setting is ignored.
default: no
type: bool
partition:
description:
- Device partition to create policy on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- <NAME> (@wojtek0806)
'''
EXAMPLES = r'''
- name: Import ASM policy
bigip_asm_policy_import:
name: new_asm_policy
file: /root/asm_policy.xml
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
- name: Import ASM policy inline
bigip_asm_policy_import:
name: foo-policy4
inline: <xml>content</xml>
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
- name: Override existing ASM policy
bigip_asm_policy:
name: new_asm_policy
file: /root/asm_policy_new.xml
force: yes
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
'''
RETURN = r'''
source:
description: Local path to an ASM policy file.
returned: changed
type: str
sample: /root/some_policy.xml
inline:
description: Contents of policy as an inline string
returned: changed
type: str
sample: <xml>foobar contents</xml>
name:
description: Name of the ASM policy to be created/overwritten
returned: changed
type: str
sample: Asm_APP1_Transparent
force:
description: Set when overwriting an existing policy
returned: changed
type: bool
sample: yes
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
updatables = []
returnables = [
'name',
'inline',
'source',
'force'
]
api_attributes = [
'file',
'name',
]
api_map = {
'file': 'inline',
'filename': 'source',
}
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
result = dict()
changed = self.policy_import()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def policy_import(self):
self._set_changed_options()
if self.module.check_mode:
return True
if self.exists():
if self.want.force is False:
return False
if self.want.inline:
task = self.inline_import()
self.wait_for_task(task)
return True
self.import_file_to_device()
self.remove_temp_policy_from_device()
return True
def exists(self):
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
return True
return False
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def _get_policy_link(self):
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
policy_link = response['items'][0]['selfLink']
return policy_link
def inline_import(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
if self.want.force:
params.update(dict(policyReference={'link': self._get_policy_link()}))
params.pop('name')
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['id']
def wait_for_task(self, task_id):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] in ['COMPLETED', 'FAILURE']:
break
time.sleep(1)
if response['status'] == 'FAILURE':
raise F5ModuleError(
'Failed to import ASM policy.'
)
if response['status'] == 'COMPLETED':
return True
def import_file_to_device(self):
name = os.path.split(self.want.source)[1]
self.upload_file_to_device(self.want.source, name)
time.sleep(2)
full_name = fq_name(self.want.partition, self.want.name)
if self.want.force:
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1} overwrite'.format(full_name, name)
else:
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_temp_policy_from_device(self):
name = os.path.split(self.want.source)[1]
tpath_name = '/var/config/rest/downloads/{0}'.format(name)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
source=dict(type='path'),
inline=dict(),
force=dict(
type='bool',
default='no'
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['source', 'inline']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 1.554688 | 2 |
Firewall/Model/Host.py | frieagle94/firewall | 0 | 11251 | <gh_stars>0
__author__ = '<NAME>'
'''
Oggetto HOST
Attributi:
- mac_address: indirizzo MAC
- port: porta a cui e' collegato
- dpid: switch a cui e' collegato
'''
class Host(object):
def __init__(self, mac_address, port, dpid):
self.mac_address = mac_address
self.port = port
self.dpid = dpid
| 2.125 | 2 |
docker-compose/tweet_collector/tweet_streamer.py | lorenanda/tweets | 2 | 11252 | from tweepy import OAuthHandler, Stream, API
from tweepy.streaming import StreamListener
import json
import logging
import pymongo
import config
client = pymongo.MongoClient(host='mongo_container', port=27018)
db = client.tweets_db
auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
api = API(auth, wait_on_rate_limit=True)
user = api.me()
logging.critical("connection established with user: " + user.name)
# # Function for Twitter authentication
# def authenticate():
# auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
# auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
# return auth
# Function for streaming tweets
class TwitterListener(StreamListener):
#defines what is done with every single tweet as it is intercepted in real-time
def __init__(self, limit, callback):
#super().__init__()
self.limit = limit
self.counter = 0
self.callback = callback
# Return an error if twitter is unreachable
def on_error(self, status):
if status == 420:
print(status)
return False
def get_tweets_dict(self, t):
if 'extended_tweet' in t:
text = t['extended_tweet']['full_text']
else:
text = t['text']
tweet = {
'username': t['user']['screen_name'],
'text': t['text'],
'followers_count': t['user']['followers_count'],
'location':t['user']['location'],
'description':t['user']['description']
}
return tweet
def on_data(self, data):
t = json.loads(data)
tweet = self.get_tweet_dict(t)
self.callback(tweet)
self.counter += 1
if self.counter == self.limit:
return False
def stream_tweets(limit, callback):
stream_listener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
stream.filter(track=['OnThisDay'], follow=['2278940227'], languages=['en'])
def warning_log(tweet):
#logging.critical(f'\n\nTWEET! {tweet["username"]} just tweeted: "{tweet["text"]}"\n\n\n')
logging.critical('\n\nTWEET: ' + tweet['username'] + 'just tweeted: ' + tweet['text'])
db.collections.onthisday.insert_one(tweet)
# Driver function
if __name__ == '__main__':
while True:
stream_tweets(5, warning_log)
time.sleep(30) | 2.984375 | 3 |
manage_it/network/models.py | ShangShungInstitute/django-manage-it | 1 | 11253 | <reponame>ShangShungInstitute/django-manage-it
from django.db import models
from django.utils.translation import ugettext_lazy as _
from assets.models import Item
from catalog.models import Inventory
CONNECTION_TYPES = (
(1, "Ethernet 1Gb"),
(2, "Ethernet 100Mb"),
(3, "WIFI"),
(4, "Optic Fiber"),
(5, "USB"),
(6, "HDMI"),
(7, "Telephone"),
)
class Network(models.Model):
"""
ItemConnection for networked assets
"""
inventory = models.ForeignKey(
Inventory, verbose_name=_(u"inventory"))
name = models.CharField(_(u"name"), max_length=100)
description = models.TextField(blank=True, null=True)
ip_range = models.CharField(
_(u"ip_range"),
blank=True, null=True, max_length=100)
def __unicode__(self):
return self.name
class Connection(models.Model):
"""
ItemConnection for networked assets
"""
concetion_type = models.SmallIntegerField(
_(u"link type"), choices=CONNECTION_TYPES)
device_1 = models.ForeignKey(
Item, verbose_name=_(u"item 1"), related_name="dev1")
device_1_interface = models.IPAddressField(
blank=True, null=True)
device_1_mac = models.CharField(
blank=True, null=True, max_length=79)
device_2 = models.ForeignKey(
Item, verbose_name=_(u"item 2"), related_name="dev2")
device_2_interface = models.IPAddressField(
blank=True, null=True)
device_2_mac = models.CharField(
blank=True, null=True, max_length=79)
description = models.TextField(
blank=True, null=True)
network = models.ForeignKey(Network)
class Meta:
unique_together = ("device_1", "device_2")
def __unicode__(self):
return "%s #%s" % (self.network, self.id)
class Interface(models.Model):
mac = models.CharField(_(u"MAC"), blank=True, null=True, max_length=79)
device = models.ForeignKey(Item, verbose_name=_(u"device"))
description = models.TextField(_(u"description"), blank=True, null=True)
def __unicode__(self):
return self.mac
| 2.296875 | 2 |
software/L1_mpu.py | roy-kruemcke/SCUTTLE-SLMP | 0 | 11254 | <filename>software/L1_mpu.py
# L1_mpu.py
# Author: <NAME> (roanoake)
# 30 NOV 2021
# Allows for the interfacing to the MPU9250 using the smbus2 i2c module
# Written for use with Raspberry Pi 4 Model B
import smbus2
import numpy as np
import data
import time
# Initialize Register Data
CONFIG = 0x1A
USER_CTRL = 0x6A
PWR_MGMT_1, PWR_MGMT_2 = 0x6B, 0x6C
GYRO_CONFIG = 0x1B
G_OFFSET = 0x13
GYRO_OUT = 0x43
ACCEL_CONFIG = 0x1C
ACCEL_CONFIG_2 = 0x1D
A_OFFSET = 0x77
ACCEL_OUT = 0x3B
TEMP_OUT = 0x41
# Initialize Scales
MAX_VAL = 2**16
ACCL_SCALE_2G=MAX_VAL/(2*2) # +-2G
ACCL_SCALE_4G=MAX_VAL/(4*2) # +-4G
ACCL_SCALE_8G=MAX_VAL/(8*2) # +-8G
ACCL_SCALE_16G=MAX_VAL/(16*2) # +-16G
GYRO_SCALE_250DG=MAX_VAL/(250*2) # +-250 deg/s
GYRO_SCALE_500DG=MAX_VAL/(500*2) # +-500 deg/s
GYRO_SCALE_1000DG=MAX_VAL/(1000*2) # +-1000 deg/s
GYRO_SCALE_2000DG=MAX_VAL/(2000*2) # +-2000 deg/s
# Open I2C bus
bus=smbus2.SMBus(1)
mpu = 0x68 # Default address for MPU
def getAccelScale():
"""
Reads the current accelerometer scale, and returns the scaling factor.
"""
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG)
scale = (acnfg & 0x18) >> 3 # Bits 4:3 hold the full scale
# Return the corresponding scale
if scale==0: return ACCL_SCALE_2G
elif scale==1: return ACCL_SCALE_4G
elif scale==2: return ACCL_SCALE_8G
elif scale==3: return ACCL_SCALE_16G
return None # If you make it here, its bad
def setAccelScale(newScale:int):
"""
Sets the accelerometer scale. Returns True if successful, False otherwise.
:param scale: integer 0-3 that corresponds to the scale.
"""
# Check input
if not(0<=newScale<=3):
print(">> ERROR: attempted to set ACCEL_SCALE to an improper value")
return False
# First, read the current scale
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
acnfg &= ~0x18 # Clear previous scale
acnfg |= (newScale << 3) # Set new scale
bus.write_byte_data(mpu,ACCEL_CONFIG,acnfg) # Write new data
time.sleep(0.01) # Wait 10ms
# Check for completion
tmp=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
tmp=(tmp & 0x18) >> 3 # Isolate scale
if tmp==newScale: # Scale was updated
return True
else: # Scale was not updated
print("> Warning: ACCEL_SCALE did not update")
return False
def getGyroScale():
print("Getting Gyrometer Scale.")
gcnfg=bus.read_byte_data(mpu,GYRO_CONFIG)
scale = (gcnfg & 0x18) >> 3 # Bits 4:3 hold the full scale
# Return the corresponding scale
if scale==0: return GYRO_SCALE_250DG
elif scale==1: return GYRO_SCALE_500DG
elif scale==2: return GYRO_SCALE_1000DG
elif scale==3: return GYRO_SCALE_2000DG
return None # If you make it here, its bad
def readAccelerometer():
try:
# Read Accelerometer Data, 2 bytes for 3 axes, 6 bytes total.
twoByteReadings = bus.read_i2c_block_data(mpu, ACCEL_OUT, 6)
# compile all the data into the 16-bit/axis readings.
binaryVals = [(twoByteReadings[i*2] << 8) | twoByteReadings[i*2 + 1] for i in range(3)]
# convert 16-bit unsigned into 16-bit signed
binaryVals = [data.getSignedVal(i,16) for i in binaryVals]
scale = getAccelScale()
# scale binary to meaningful value
accel_vals = [val/scale for val in binaryVals]
# round to 3 decimal places
accel_vals = np.round(accel_vals,3)
except:
print(">> ERROR: ACCEL_OUT could not be read.")
accel_vals = [0,0,0]
return accel_vals
def readGyrometer():
print("Reading Gyrometer")
def readTemperature():
print("Reading Temperature")
print(readAccelerometer())
| 2.5625 | 3 |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/paletted_texture.py | JE-Chen/je_old_repo | 0 | 11255 | <reponame>JE-Chen/je_old_repo
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_paletted_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_paletted_texture',error_checker=_errors._error_checker)
GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6)
GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7)
GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2)
GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3)
GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4)
GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5)
GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glColorTableEXT(target,internalFormat,width,format,type,table):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glGetColorTableEXT(target,format,type,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetColorTableParameterfvEXT(target,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetColorTableParameterivEXT(target,pname,params):pass
| 1.296875 | 1 |
backend/apps/csyllabusapi/views/university.py | CSyllabus/webapp | 3 | 11256 | <gh_stars>1-10
from rest_framework.parsers import JSONParser, FileUploadParser
from rest_framework.views import APIView
from ..models import City
from ..models import Country
from ..models import University
from ..models import Faculty
from ..models import Program
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.decorators import parser_classes
from django.utils import timezone
try:
from django.utils import simplejson as json
except ImportError:
import json
@permission_classes((permissions.AllowAny,))
@parser_classes((JSONParser,))
class UniversityView(APIView):
def post(self, request):
name = request.data['name']
country = Country.objects.get(id=request.data['country_id'])
city = City.objects.get(id=request.data['city_id'])
University.objects.create(name=name, country=country, city=city)
return Response()
def delete(selfself, request):
id = request.data['id']
University.objects.filter(id=id).delete()
return Response()
def put(selfself, request):
id = request.data['id']
name = request.data['name']
country = Country.objects.get(id=request.data['country_id'])
city = City.objects.get(id=request.data['city_id'])
University.objects.filter(id=id).update(name=name, country=country, city=city, modified=timezone.now())
return Response()
| 2.15625 | 2 |
prickly-pufferfish/python_questions/add_to_zero.py | Vthechamp22/summer-code-jam-2021 | 40 | 11257 | """
Write a function with a list of ints as a paramter. /
Return True if any two nums sum to 0. /
>>> add_to_zero([]) /
False /
>>> add_to_zero([1]) /
False /
>>> add_to_zero([1, 2, 3]) /
False /
>>> add_to_zero([1, 2, 3, -2]) /
True /
"""
| 3.703125 | 4 |
op_trans/asgi.py | jezzlucena/django-opp-trans | 1 | 11258 | """
ASGI config for op_trans project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from op_trans.websocket import websocket_application
from op_trans.redis_cli import RedisCli
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings')
django_application = get_asgi_application()
async def application(scope, receive, send):
RedisCli.get()
if scope['type'] == 'http':
await django_application(scope, receive, send)
elif scope['type'] == 'websocket':
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
| 1.929688 | 2 |
notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 0 | 11259 | # -*- coding: utf-8 -*-
"""API routes config for notifai_recruitment project.
REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent
way of wiring view logic to a set of URLs.
For more information on this file, see
https://www.django-rest-framework.org/api-guide/routers/
"""
from rest_framework import routers
from textify.api.views import NoteViewSet
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
| 1.664063 | 2 |
zipline/data/bundles/equities_bundle.py | walterkissling/zipline | 0 | 11260 | <reponame>walterkissling/zipline
# File to ingest an equities bundle for zipline
# Import libraries
import pandas as pd
import numpy as np
def equities_bundle(path_to_file):
# Define custom ingest function
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir,
start_session,
end_session):
# Read in data
data = pd.read_csv(path_to_file, index_col = [0, 1], parse_dates = [1], infer_datetime_format = True)
data.volume = data.volume.astype(int)
#data.loc[:, 'volume'] = 100000000
symbols = data.index.levels[0].tolist()
#start_dt = data.index.levels[1].min()
#end_dt = data.index.levels[1].max()
# Create asset metadata
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = pd.DataFrame(np.empty(len(symbols), dtype=dtype))
# Create dividend and split dataframe
dividends = pd.DataFrame(columns = ['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date'])
splits = pd.DataFrame(columns = ['sid', 'ratio','effective_date'])
# Create list to hold data
data_to_write = []
# Loop through symbols and prepare data
for sid, symbol in enumerate(symbols):
data_ = data.loc[symbol].sort_index()
start_dt = data_.index.min()
end_dt = data_.index.max()
# Set auto cloes to day after last trade
ac_date = end_dt + pd.tseries.offsets.BDay()
metadata.iloc[sid] = start_dt, end_dt, ac_date, symbol
# Check for splits and dividends
if 'split' in data_.columns:
tmp = 1. / data_[data_['split'] != 1.0]['split']
split = pd.DataFrame(data = tmp.index.tolist(), columns = ['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
index = pd.Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
splits = splits.append(split)
if 'dividend' in data_.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = data_[data_['dividend'] != 0.0]['dividend']
div = pd.DataFrame(data = tmp.index.tolist(), columns = ['ex_date'])
div['record_date'] = tmp.index
div['declared_date'] = tmp.index
div['pay_date'] = tmp.index
div['amount'] = tmp.tolist()
div['sid'] = sid
ind = pd.Index(range(dividends.shape[0], dividends.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
dividends = dividends.append(div)
# Append data to list
data_to_write.append((sid, data_))
daily_bar_writer.write(data_to_write, show_progress = True)
# Hardcode exchange data
metadata['exchange'] = 'CSV'
# Write metadata
asset_db_writer.write(equities = metadata)
# Write splits and dividents
dividends['sid'] = dividends['sid'].astype(int)
splits['sid'] = splits['sid'].astype(int)
adjustment_writer.write(splits = splits,
dividends = dividends)
return ingest | 2.625 | 3 |
model/torch_model.py | FernandoLpz/ONNX-PyTorch-TF-Caffe2 | 3 | 11261 | import torch
import torch.nn as nn
class TorchModel(nn.ModuleList):
def __init__(self):
super(TorchModel, self).__init__()
self.linear_1 = nn.Linear(2, 12)
self.linear_2 = nn.Linear(12, 1)
def forward(self, x):
out = self.linear_1(x)
out = torch.tanh(out)
out = self.linear_2(out)
out = torch.sigmoid(out)
return out | 3.125 | 3 |
nereid/contrib/pagination.py | advocatetax/nereid-1 | 0 | 11262 | <reponame>advocatetax/nereid-1
# -*- coding: utf-8 -*-
# This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from math import ceil
from sql import Select, Column
from sql.functions import Function
from sql.aggregate import Count
from werkzeug.utils import cached_property
class BasePagination(object):
"""
General purpose paginator for doing pagination
With an empty dataset assert the attributes
>>> p = Pagination(1, 3, [])
>>> p.count
0
>>> p.pages
0
>>> p.begin_count
0
>>> p.end_count
0
Test with a range(1, 10)
>>> p = Pagination(1, 3, range(1, 10))
>>> p.count
9
>>> p.all_items()
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> p.pages
3
>>> p.begin_count
1
>>> p.end_count
3
"""
def __init__(self, page, per_page, data=None):
"""
:param per_page: Items per page
:param page: The page to be displayed
:param data: The data table
"""
self.per_page = per_page
self.page = page
self.data = data if data is not None else []
@property
def count(self):
"Returns the count of data"
return len(self.data)
def all_items(self):
"""Returns complete set of items"""
return self.data
def items(self):
"""Returns the list of items in current page
"""
return self.data[self.offset:self.offset + self.per_page]
def __iter__(self):
for item in list(self.items()):
yield item
def __len__(self):
return self.count
def serialize(self):
return {
"count": self.count,
"pages": self.pages,
"page": self.page,
"per_page": self.per_page,
"items": list(self.items()),
}
@property
def prev(self):
"""Returns a :class:`Pagination` object for the previous page."""
return Pagination(self.page - 1, self.per_page, self.data)
def __next__(self):
"""Returns a :class:`Pagination` object for the next page."""
return Pagination(self.page + 1, self.per_page, self.data)
#: Attributes below this may not require modifications in general cases
def iter_pages(
self, left_edge=2, left_current=2, right_current=2, right_edge=2
):
"""
Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">
{{ page }}
</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
offset = property(lambda self: (self.page - 1) * self.per_page)
prev_num = property(lambda self: self.page - 1)
has_prev = property(lambda self: self.page > 1)
next_num = property(lambda self: self.page + 1)
has_next = property(lambda self: self.page < self.pages)
pages = property(lambda self: int(ceil(self.count / float(self.per_page))))
begin_count = property(lambda self: min([
((self.page - 1) * self.per_page) + 1,
self.count]))
end_count = property(lambda self: min(
self.begin_count + self.per_page - 1, self.count))
class Pagination(BasePagination):
"""
General purpose paginator for doing pagination which can be used by
passing a search domain .Remember that this means the query will be built
and executed and passed on which could be slower than writing native SQL
queries. While this fits into most use cases, if you would like to use
a SQL query rather than a domain use :class:QueryPagination instead
"""
# The counting of all possible records can be really expensive if you
# have too many records and the selectivity of the query is low. For
# example - a query to display all products in a website would be quick
# in displaying the products but slow in building the navigation. So in
# cases where this could be frequent, the value of count may be cached and
# assigned to this variable
_count = None
def __init__(self, obj, domain, page, per_page, order=None):
"""
:param obj: The object itself. pass self within tryton object
:param domain: Domain for search in tryton
:param per_page: Items per page
:param page: The page to be displayed
"""
self.obj = obj
self.domain = domain
self.order = order
super(Pagination, self).__init__(page, per_page)
@cached_property
def count(self):
"""
Returns the count of entries
"""
if self.ids_domain():
return len(self.domain[0][2])
if self._count is not None:
return self._count
return self.obj.search(domain=self.domain, count=True)
def all_items(self):
"""Returns complete set of items"""
if self.ids_domain():
return self.obj.browse(self.domain[0][2])
return self.obj.search(self.domain)
def ids_domain(self):
"""
Returns True if the domain has only IDs and can skip SQL fetch
to directly browse the records. Else a False is returned
"""
return (len(self.domain) == 1) and \
(self.domain[0][0] == 'id') and \
(self.domain[0][1] == 'in') and \
(self.order is None)
def serialize(self, purpose=None):
rv = super(Pagination, self).serialize()
if hasattr(self.obj, 'serialize'):
rv['items'] = [item.serialize(purpose) for item in list(self.items())]
elif hasattr(self.obj, '_json'):
# older style _json methods
rv['items'] = [item._json() for item in list(self.items())]
else:
rv['items'] = [
{
'id': item.id,
'rec_name': item.rec_name,
} for item in list(self.items())
]
return rv
def items(self):
"""
Returns the list of browse records of items in the page
"""
if self.ids_domain():
ids = self.domain[0][2][self.offset:self.offset + self.per_page]
return self.obj.browse(ids)
else:
return self.obj.search(
self.domain, offset=self.offset, limit=self.per_page,
order=self.order
)
@property
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
return self.obj.paginate(self.page - 1, self.per_page, error_out)
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
return self.obj.paginate(self.page + 1, self.per_page, error_out)
class Distinct(Function):
__slots__ = ()
_function = 'DISTINCT'
class QueryPagination(BasePagination):
"""A fast implementation of pagination which uses a SQL query for
generating the IDS and hence the pagination
.. versionchanged::3.2.0.5
The SQL Query has to be an instance of `sql.Select`.
"""
def __init__(self, obj, query, primary_table, page, per_page):
"""
:param query: Query to be used for search.
It must not include an OFFSET or LIMIT as they
would be automatically added to the query.
It must also not have any columns in the select.
:param primary_table: The ~`sql.Table` instance from which the records
have to be selected.
:param page: The page to be displayed
:param per_page: Items per page
"""
self.obj = obj
assert isinstance(query, Select), "Query must be python-sql"
self.query = query
self.primary_table = primary_table
super(QueryPagination, self).__init__(page, per_page)
@cached_property
def count(self):
"Return the count of the Items"
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Count(Distinct(self.primary_table.id)), )
cursor = Transaction().connection.cursor()
# temporarily remove order_by
order_by = query.order_by
query.order_by = None
try:
cursor.execute(*query)
finally:
# XXX: This can be removed when SQL queries can be copied
# See comment above
query.order_by = order_by
res = cursor.fetchone()
if res:
return res[0]
# There can be a case when query return None and then count
# will be zero
return 0
def all_items(self):
"""Returns complete set of items"""
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Distinct(self.primary_table.id), ) + tuple(
(o.expression for o in query.order_by if isinstance(
o.expression, Column
))
)
query.offset = None
query.limit = None
cursor = Transaction().connection.cursor()
cursor.execute(*query)
rv = [x[0] for x in cursor.fetchall()]
return self.obj.browse([_f for _f in rv if _f])
def items(self):
"""
Returns the list of browse records of items in the page
"""
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Distinct(self.primary_table.id), ) + tuple(
(o.expression for o in query.order_by if isinstance(
o.expression, Column
))
)
query.offset = self.offset
query.limit = self.per_page
cursor = Transaction().connection.cursor()
cursor.execute(*query)
rv = [x[0] for x in cursor.fetchall()]
return self.obj.browse([_f for _f in rv if _f])
| 2.484375 | 2 |
app.py | Arpan-206/Youtube-Downloader-Flask | 3 | 11263 | <reponame>Arpan-206/Youtube-Downloader-Flask
from flask import Flask, request, send_file, render_template, url_for
import pytube
import logging
import sys
import os
from hello import timed_delete
from threading import Timer
timed_delete()
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
@app.route("/")
def youtube_downloader():
my_css = url_for('static', filename='cover.css')
return render_template('index.html', css_path= my_css)
@app.route("/download_video", methods=["GET","POST"])
def download_video():
"""
First pytube downloads the file locally in pythonanywhere:
/home/your_username/video_name.mp4
Then use Flask's send_file() to download the video
to the user's Downloads folder.
"""
local_download_path = pytube.YouTube("https://www.youtube.com/watch?v=b1JlYZQG3lI").streams.get_highest_resolution().download()
fname = local_download_path.split("//")
return send_file(fname, as_attachment=True)
| 3.359375 | 3 |
si_unit_pandas/base.py | domdfcoding/si_unit_pandas | 0 | 11264 | #!/usr/bin/env python3
#
# base.py
"""
Base functionality.
"""
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Based on cyberpandas
# https://github.com/ContinuumIO/cyberpandas
# Copyright (c) 2018, Anaconda, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# _isstringslice based on awkward-array
# https://github.com/scikit-hep/awkward-array
# Copyright (c) 2018-2019, <NAME>
# Licensed under the BSD 3-Clause License
#
# stdlib
from abc import abstractmethod
from numbers import Real
from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload
# 3rd party
import numpy # type: ignore
from domdf_python_tools.doctools import prettify_docstrings
from pandas.core.arrays import ExtensionArray # type: ignore
from pandas.core.dtypes.base import ExtensionDtype # type: ignore
from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore
from typing_extensions import Literal, Protocol
__all__ = ["NumPyBackedExtensionArrayMixin"]
class NumPyBackedExtensionArrayMixin(ExtensionArray):
"""
Mixin for pandas extension backed by a numpy array.
"""
_dtype: Type[ExtensionDtype]
@property
def dtype(self):
"""
The dtype for this extension array, :class:`~.CelsiusType`.
"""
return self._dtype
@classmethod
def _from_sequence(cls, scalars: Iterable, dtype=None, copy: bool = False):
"""
Construct a new ExtensionArray from a sequence of scalars.
:param scalars: Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
:param dtype: Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
:type dtype: dtype, optional
:param copy: If True, copy the underlying data.
"""
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values: numpy.ndarray, original: ExtensionArray):
"""
Reconstruct an ExtensionArray after factorization.
:param values: An integer ndarray with the factorized values.
:param original: The original ExtensionArray that factorize was called on.
.. seealso::
:meth:`pandas.pandas.api.extensions.ExtensionArray.factorize`
"""
return cls(values)
@property
def shape(self) -> Tuple[int]:
"""
Return a tuple of the array dimensions.
"""
return len(self.data),
def __len__(self) -> int:
"""
Returns the length of this array.
"""
return len(self.data)
def setitem(self, indexer, value):
"""
Set the 'value' inplace.
"""
# I think having a separate than __setitem__ is good
# since we have to return here, but __setitem__ doesn't.
self[indexer] = value
return self
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
return self._itemsize * len(self)
def _formatting_values(self):
return numpy.array(self._format_values(), dtype="object")
def copy(self, deep: bool = False) -> ABCExtensionArray:
"""
Return a copy of the array.
:param deep:
:return:
:rtype:
"""
return type(self)(self.data.copy())
@classmethod
def _concat_same_type(cls, to_concat: Sequence[ABCExtensionArray]) -> ABCExtensionArray:
"""
Concatenate multiple arrays.
:param to_concat: sequence of this type
"""
return cls(numpy.concatenate([array.data for array in to_concat]))
def tolist(self) -> List:
"""
Convert the array to a Python list.
"""
return self.data.tolist()
def argsort(
self,
ascending: bool = True,
kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort",
*args,
**kwargs,
) -> numpy.ndarray:
r"""
Return the indices that would sort this array.
:param ascending: Whether the indices should result in an ascending
or descending sort.
:param kind: {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
\*args and \*\*kwargs are passed through to :func:`numpy.argsort`.
:return: Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
.. seealso::
:class:`numpy.argsort`: Sorting implementation used internally.
"""
return self.data.argsort()
def unique(self) -> ExtensionArray: # noqa: D102
# https://github.com/pandas-dev/pandas/pull/19869
_, indices = numpy.unique(self.data, return_index=True)
data = self.data.take(numpy.sort(indices))
return self._from_ndarray(data)
_A = TypeVar("_A")
class BaseArray(numpy.lib.mixins.NDArrayOperatorsMixin, NumPyBackedExtensionArrayMixin):
ndim: int = 1
data: numpy.ndarray
@classmethod
def _from_ndarray(cls: _A, data: numpy.ndarray, copy: bool = False) -> _A:
"""
Zero-copy construction of a BaseArray from an ndarray.
:param data: This should have CelsiusType._record_type dtype
:param copy: Whether to copy the data.
:return:
"""
if copy:
data = data.copy()
new = cls([]) # type: ignore
new.data = data
return new
@property
def na_value(self):
"""
The missing value.
**Example:**
.. code-block::
>>> BaseArray([]).na_value
numpy.nan
"""
return self.dtype.na_value
def take(self, indices, allow_fill: bool = False, fill_value=None):
# Can't use pandas' take yet
# 1. axis
# 2. I don't know how to do the reshaping correctly.
indices = numpy.asarray(indices, dtype="int")
if allow_fill and fill_value is None:
fill_value = self.na_value
elif allow_fill and not isinstance(fill_value, tuple):
if not numpy.isnan(fill_value):
fill_value = int(fill_value)
if allow_fill:
mask = (indices == -1)
if not len(self):
if not (indices == -1).all():
msg = "Invalid take for empty array. Must be all -1."
raise IndexError(msg)
else:
# all NA take from and empty array
took = (
numpy.full(
(len(indices), 2),
fill_value,
dtype=">u8",
).reshape(-1).astype(self.dtype._record_type)
)
return self._from_ndarray(took)
if (indices < -1).any():
msg = "Invalid value in 'indicies'. Must be all >= -1 for 'allow_fill=True'"
raise ValueError(msg)
took = self.data.take(indices)
if allow_fill:
took[mask] = fill_value
return self._from_ndarray(took)
def __repr__(self) -> str:
formatted = self._format_values()
return f"{self.__class__.__name__}({formatted!r})"
def isna(self):
"""
Indicator for whether each element is missing.
"""
if numpy.isnan(self.na_value):
return numpy.isnan(self.data)
else:
return self.data == self.na_value
# From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611
def _isstringslice(self, where):
if isinstance(where, str):
return True
elif isinstance(where, bytes):
raise TypeError("column selection must be str, not bytes, in Python 3")
elif isinstance(where, tuple):
return False
elif (
isinstance(where, (numpy.ndarray, self.__class__))
and issubclass(where.dtype.type, (numpy.str, numpy.str_))
):
return True
elif isinstance(where, (numpy.ndarray, self.__class__)) and issubclass(
where.dtype.type, (numpy.object, numpy.object_)
) and not issubclass(where.dtype.type, (numpy.bool, numpy.bool_)):
return len(where) > 0 and all(isinstance(x, str) for x in where)
elif isinstance(where, (numpy.ndarray, self.__class__)):
return False
try:
assert len(where) > 0
assert all(isinstance(x, str) for x in where)
except (TypeError, AssertionError):
return False
else:
return True
def __delitem__(self, where):
if isinstance(where, str):
del self.data[where]
elif self._isstringslice(where):
for x in where:
del self.data[x]
else:
raise TypeError(f"invalid index for removing column from Table: {where}")
@property
@abstractmethod
def _parser(self):
raise NotImplementedError
def append(self, value) -> None:
"""
Append a value to this BaseArray.
:param value:
"""
self.data = numpy.append(self.data, self._parser(value).data)
def __setitem__(self, key, value):
value = self._parser(value).data
self.data[key] = value
class _SupportsIndex(Protocol):
def __index__(self) -> int:
...
_F = TypeVar("_F", bound="UserFloat")
@prettify_docstrings
class UserFloat(Real):
"""
Class that simulates a float.
:param value: Values to initialise the :class:`~domdf_python_tools.bases.UserFloat` with.
.. versionadded:: 1.6.0
"""
def __init__(self, value: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = 0.0):
self._value = (float(value), )
def as_integer_ratio(self) -> Tuple[int, int]:
return float(self).as_integer_ratio()
def hex(self) -> str: # noqa: A003 # pylint: disable=redefined-builtin
return float(self).hex()
def is_integer(self) -> bool:
return float(self).is_integer()
@classmethod
def fromhex(cls: Type[_F], __s: str) -> _F:
return cls(float.fromhex(__s))
def __add__(self: _F, other: float) -> _F:
return self.__class__(float(self).__add__(other))
def __sub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__sub__(other))
def __mul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mul__(other))
def __floordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__floordiv__(other))
def __truediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__truediv__(other))
def __mod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mod__(other))
def __divmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__divmod__(other)) # type: ignore
def __pow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__pow__(other, mod))
def __radd__(self: _F, other: float) -> _F:
return self.__class__(float(self).__radd__(other))
def __rsub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rsub__(other))
def __rmul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmul__(other))
def __rfloordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__rfloordiv__(other))
def __rtruediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rtruediv__(other))
def __rmod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmod__(other))
def __rdivmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__rdivmod__(other)) # type: ignore
def __rpow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__rpow__(other, mod))
def __getnewargs__(self) -> Tuple[float]:
return self._value
def __trunc__(self) -> int:
return float(self).__trunc__()
@overload
def __round__(self, ndigits: int) -> float:
...
@overload
def __round__(self, ndigits: None = ...) -> int:
...
def __round__(self, ndigits: Optional[int] = None) -> Union[int, float]:
return float(self).__round__(ndigits)
def __eq__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value == other._value
else:
return float(self).__eq__(other)
def __ne__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value != other._value
else:
return float(self).__ne__(other)
def __lt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value < other._value
else:
return float(self).__lt__(other)
def __le__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value <= other._value
else:
return float(self).__le__(other)
def __gt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value > other._value
else:
return float(self).__gt__(other)
def __ge__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value >= other._value
else:
return float(self).__ge__(other)
def __neg__(self: _F) -> _F:
return self.__class__(float(self).__neg__())
def __pos__(self: _F) -> _F:
return self.__class__(float(self).__pos__())
def __str__(self) -> str:
return str(float(self))
def __int__(self) -> int:
return int(float(self))
def __float__(self) -> float:
return self._value[0]
def __abs__(self: _F) -> _F:
return self.__class__(float(self).__abs__())
def __hash__(self) -> int:
return float(self).__hash__()
def __repr__(self) -> str:
return str(self)
def __ceil__(self):
raise NotImplementedError
def __floor__(self):
raise NotImplementedError
| 1.539063 | 2 |
agents/admin.py | HerbertRamirez/inmo_web | 0 | 11265 | <filename>agents/admin.py
from django.contrib import admin
from .models import Agent
# Register your models here.
class AgentAdmin(admin.ModelAdmin):
readonly_fields = ('created','updated')
admin.site.register(Agent) | 1.6875 | 2 |
modules/DEFA/MS_Office/compoundfiles/const.py | naaya17/carpe | 56 | 11266 | <filename>modules/DEFA/MS_Office/compoundfiles/const.py
#!/usr/bin/env python
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# A library for reading Microsoft's OLE Compound Document format
# Copyright (c) 2014 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
native_str = str
str = type('')
import struct as st
# Magic identifier at the start of the file
COMPOUND_MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
FREE_SECTOR = 0xFFFFFFFF # denotes an unallocated (free) sector
END_OF_CHAIN = 0xFFFFFFFE # denotes the end of a stream chain
NORMAL_FAT_SECTOR = 0xFFFFFFFD # denotes a sector used for the regular FAT
MASTER_FAT_SECTOR = 0xFFFFFFFC # denotes a sector used for the master FAT
MAX_NORMAL_SECTOR = 0xFFFFFFFA # the maximum sector in a file
MAX_REG_SID = 0xFFFFFFFA # maximum directory entry ID
NO_STREAM = 0xFFFFFFFF # unallocated directory entry
DIR_INVALID = 0 # unknown/empty(?) storage type
DIR_STORAGE = 1 # element is a storage (dir) object
DIR_STREAM = 2 # element is a stream (file) object
DIR_LOCKBYTES = 3 # element is an ILockBytes object
DIR_PROPERTY = 4 # element is an IPropertyStorage object
DIR_ROOT = 5 # element is the root storage object
FILENAME_ENCODING = 'latin-1'
COMPOUND_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('8s'), # magic string
native_str('16s'), # file UUID (unused)
native_str('H'), # file header major version
native_str('H'), # file header minor version
native_str('H'), # byte order mark
native_str('H'), # sector size (actual size is 2**sector_size)
native_str('H'), # mini sector size (actual size is 2**short_sector_size)
native_str('6s'), # unused
native_str('L'), # directory chain sector count
native_str('L'), # normal-FAT sector count
native_str('L'), # ID of first sector of the normal-FAT
native_str('L'), # transaction signature (unused)
native_str('L'), # minimum size of a normal stream
native_str('L'), # ID of first sector of the mini-FAT
native_str('L'), # mini-FAT sector count
native_str('L'), # ID of first sector of the master-FAT
native_str('L'), # master-FAT sector count
))))
DIR_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('64s'), # NULL-terminated filename in UTF-16 little-endian encoding
native_str('H'), # length of filename in bytes (why?!)
native_str('B'), # dir-entry type
native_str('B'), # red (0) or black (1) entry
native_str('L'), # ID of left-sibling node
native_str('L'), # ID of right-sibling node
native_str('L'), # ID of children's root node
native_str('16s'), # dir-entry UUID (unused)
native_str('L'), # user flags (unused)
native_str('Q'), # creation timestamp
native_str('Q'), # modification timestamp
native_str('L'), # start sector of stream
native_str('L'), # low 32-bits of stream size
native_str('L'), # high 32-bits of stream size
))))
| 1.210938 | 1 |
Validation/valid_view_point_cloud.py | dtczhl/Slimmer | 0 | 11267 | """
view predication for point cloud,
Run valid_one_point_cloud first
"""
import torch
import numpy as np
import sys
import os
import pptk
# ------ Configurations ------
# path to pth file
pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100"
show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back
# --- end of configurations ---
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',
'bathtub', 'otherfurniture']
# CLASS_COLOR = [
# [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0],
# [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0],
# [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128],
# [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63]
# ]
SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
VALID_CLASS_IDS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39
]
CLASS_COLOR = []
for valid_id in VALID_CLASS_IDS:
CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id])
CLASS_COLOR = np.array(CLASS_COLOR) / 255.0
def show_predication_result(pth_file, show_gt):
data = torch.load(pth_file)
coords, colors, labels, pred = data
ignore_index = labels == -100
coords = coords[~ignore_index]
colors = colors[~ignore_index]
labels = labels[~ignore_index]
pred = pred[~ignore_index]
gt_color = [CLASS_COLOR[x] for x in labels.astype("int32")]
pred_color = [CLASS_COLOR[x] for x in pred.astype("int32")]
if show_gt:
v1 = pptk.viewer(coords, gt_color)
v1.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v1.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
v2 = pptk.viewer(coords, pred_color)
v2.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v2.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
if __name__ == "__main__":
show_predication_result(pth_file, show_gt)
| 2.3125 | 2 |
core/migrations/0009_measurement.py | Potanist/Potanist | 0 | 11268 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_grow_owner'),
]
operations = [
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('air_temperature', models.IntegerField(null=True, blank=True)),
('water_temperature', models.IntegerField(null=True, blank=True)),
('humidity', models.IntegerField(null=True, blank=True)),
('co2', models.IntegerField(null=True, blank=True)),
('ppm', models.IntegerField(null=True, blank=True)),
('tds', models.IntegerField(null=True, blank=True)),
('ec', models.IntegerField(null=True, blank=True)),
('ph', models.IntegerField(null=True, blank=True)),
('lumen', models.IntegerField(null=True, blank=True)),
('plant', models.ForeignKey(to='core.Plant')),
],
),
]
| 1.75 | 2 |
mkt/purchase/models.py | muffinresearch/zamboni | 0 | 11269 | import datetime
from django.conf import settings
from django.db import models
from django.utils import translation
import tower
from babel import Locale, numbers
from jingo import env
from jinja2.filters import do_dictsort
from tower import ugettext as _
import amo
from amo.fields import DecimalCharField
from amo.helpers import absolutify, urlparams
from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja
class ContributionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Contribution(amo.models.ModelBase):
addon = models.ForeignKey('webapps.Addon', blank=True, null=True)
# For in-app purchases this links to the product.
inapp_product = models.ForeignKey('inapp.InAppProduct',
blank=True, null=True)
amount = DecimalCharField(max_digits=9, decimal_places=2,
nullify_invalid=True, null=True)
currency = models.CharField(max_length=3,
choices=do_dictsort(amo.PAYPAL_CURRENCIES),
default=amo.CURRENCY_DEFAULT)
source = models.CharField(max_length=255, null=True)
source_locale = models.CharField(max_length=10, null=True)
# This is the external id that you can communicate to the world.
uuid = models.CharField(max_length=255, null=True, db_index=True)
comment = models.CharField(max_length=255)
# This is the internal transaction id between us and a provider,
# for example paypal or solitude.
transaction_id = models.CharField(max_length=255, null=True, db_index=True)
paykey = models.CharField(max_length=255, null=True)
# Marketplace specific.
# TODO(andym): figure out what to do when we delete the user.
user = models.ForeignKey('users.UserProfile', blank=True, null=True)
type = models.PositiveIntegerField(default=amo.CONTRIB_TYPE_DEFAULT,
choices=do_dictsort(amo.CONTRIB_TYPES))
price_tier = models.ForeignKey('prices.Price', blank=True, null=True,
on_delete=models.PROTECT)
# If this is a refund or a chargeback, which charge did it relate to.
related = models.ForeignKey('self', blank=True, null=True,
on_delete=models.PROTECT)
class Meta:
db_table = 'stats_contributions'
def __unicode__(self):
return u'App {app}: in-app: {inapp}: {amount}'.format(
app=self.addon, amount=self.amount, inapp=self.inapp_product)
@property
def date(self):
try:
return datetime.date(self.created.year,
self.created.month, self.created.day)
except AttributeError:
# created may be None
return None
def _switch_locale(self):
if self.source_locale:
lang = self.source_locale
else:
lang = self.addon.default_locale
tower.activate(lang)
return Locale(translation.to_locale(lang))
def _mail(self, template, subject, context):
template = env.get_template(template)
body = template.render(context)
send_mail(subject, body, settings.MARKETPLACE_EMAIL,
[self.user.email], fail_silently=True)
def record_failed_refund(self, e, user):
self.enqueue_refund(amo.REFUND_FAILED, user,
rejection_reason=str(e))
self._switch_locale()
self._mail('users/support/emails/refund-failed.txt',
# L10n: the addon name.
_(u'%s refund failed' % self.addon.name),
{'name': self.addon.name})
send_mail_jinja(
'Refund failed', 'purchase/email/refund-failed.txt',
{'name': self.user.email,
'error': str(e)},
settings.MARKETPLACE_EMAIL,
[str(self.addon.support_email)], fail_silently=True)
def mail_approved(self):
"""The developer has approved a refund."""
locale = self._switch_locale()
amt = numbers.format_currency(abs(self.amount), self.currency,
locale=locale)
self._mail('users/support/emails/refund-approved.txt',
# L10n: the adddon name.
_(u'%s refund approved' % self.addon.name),
{'name': self.addon.name, 'amount': amt})
def mail_declined(self):
"""The developer has declined a refund."""
self._switch_locale()
self._mail('users/support/emails/refund-declined.txt',
# L10n: the adddon name.
_(u'%s refund declined' % self.addon.name),
{'name': self.addon.name})
def enqueue_refund(self, status, user, refund_reason=None,
rejection_reason=None):
"""Keep track of a contribution's refund status."""
from mkt.prices.models import Refund
refund, c = Refund.objects.safer_get_or_create(contribution=self,
user=user)
refund.status = status
# Determine which timestamps to update.
timestamps = []
if status in (amo.REFUND_PENDING, amo.REFUND_APPROVED_INSTANT,
amo.REFUND_FAILED):
timestamps.append('requested')
if status in (amo.REFUND_APPROVED, amo.REFUND_APPROVED_INSTANT):
timestamps.append('approved')
elif status == amo.REFUND_DECLINED:
timestamps.append('declined')
for ts in timestamps:
setattr(refund, ts, datetime.datetime.now())
if refund_reason:
refund.refund_reason = refund_reason
if rejection_reason:
refund.rejection_reason = rejection_reason
refund.save()
return refund
def get_amount_locale(self, locale=None):
"""Localise the amount paid into the current locale."""
if not locale:
lang = translation.get_language()
locale = get_locale_from_lang(lang)
return numbers.format_currency(self.amount or 0,
self.currency or 'USD',
locale=locale)
def get_refund_url(self):
return urlparams(self.addon.get_dev_url('issue_refund'),
transaction_id=self.transaction_id)
def get_absolute_refund_url(self):
return absolutify(self.get_refund_url())
def get_refund_contribs(self):
"""Get related set of refund contributions."""
return Contribution.objects.filter(
related=self, type=amo.CONTRIB_REFUND).order_by('-modified')
def is_refunded(self):
"""
If related has been set, then this transaction has been refunded or
charged back. This is a bit expensive, so refrain from using on listing
pages.
"""
return (Contribution.objects.filter(related=self,
type__in=[amo.CONTRIB_REFUND,
amo.CONTRIB_CHARGEBACK])
.exists())
| 1.914063 | 2 |
src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 19 | 11270 | <reponame>danheath/temppykol<filename>src/kol/request/CampgroundRestRequest.py
from kol.request.GenericRequest import GenericRequest
class CampgroundRestRequest(GenericRequest):
"Rests at the user's campground."
def __init__(self, session):
super(CampgroundRestRequest, self).__init__(session)
self.url = session.serverURL + 'campground.php?action=rest'
| 2.359375 | 2 |
pdlearn/adaptor/methods.py | richlewis42/pandas-learn | 1 | 11271 | <filename>pdlearn/adaptor/methods.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of pandas-learn
# https://github.com/RichLewis42/pandas-learn
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
# Copyright (c) 2015, <NAME> <<EMAIL>>
"""
pdlearn.adaptor.methods
~~~~~~~~~~~~~~~~~~~~~~~
Module implementing methods for pdlearn classes.
"""
import pandas as pd
def feature_property(name):
"""
Create a method adapting a parent class' property to return a pandas frame.
"""
# pylint: disable=C0111
@property
def method(self):
# pylint: disable=W0212
with self._unyouthanize():
prop = getattr(self, name + '_')
if self.pandas_mode_:
return pd.Series(prop, index=self.feature_names_, name=name)
else:
return prop
return method
| 3.265625 | 3 |
BroCode/lessons/13-nested_loops.py | sofiaEkn/Python_Exercises | 0 | 11272 | <filename>BroCode/lessons/13-nested_loops.py
# nested loops = The "inner loop" will finish all of it's iterations before
# finishing one iteration of the "outer loop"
rows = int(input("How many rows?: "))
columns = int(input("How many columns?: "))
symbol = input("Enter a symbol to use: ")
#symbol = int(input("Enter a symbol to use: "))
for i in range(rows):
for j in range(columns):
print(symbol, end="")
print() | 4.5 | 4 |
duct/sources/python/uwsgi.py | geostarling/duct | 12 | 11273 | <gh_stars>10-100
"""
.. module:: uwsgi
:platform: Any
:synopsis: Reads UWSGI stats
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from zope.interface import implementer
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator, Protocol
from duct.interfaces import IDuctSource
from duct.objects import Source
class JSONProtocol(Protocol):
"""
JSON line protocol
"""
delimiter = '\n'
def __init__(self):
self.ready = False
self.buf = StringIO()
self.d = defer.Deferred()
def dataReceived(self, data):
self.buf.write(data)
def connectionLost(self, *_a):
self.buf.seek(0)
self.d.callback(json.load(self.buf))
def disconnect(self):
"""Disconnect transport
"""
return self.transport.loseConnection()
@implementer(IDuctSource)
class Emperor(Source):
"""Connects to UWSGI Emperor stats and creates useful metrics
**Configuration arguments:**
:param host: Hostname (default localhost)
:type host: str.
:param port: Port
:type port: int.
"""
@defer.inlineCallbacks
def get(self):
host = self.config.get('host', 'localhost')
port = int(self.config.get('port', 6001))
proto = yield ClientCreator(
reactor, JSONProtocol).connectTCP(host, port)
stats = yield proto.d
nodes = stats.get('vassals', [])
events = []
active = 0
accepting = 0
respawns = 0
for node in nodes:
if node['accepting'] > 0:
active += 1
accepting += node['accepting']
if node['respawns'] > 0:
respawns += 1
events.extend([
self.createEvent('ok', 'accepting', node['accepting'],
prefix=node['id'] + '.accepting'),
self.createEvent('ok', 'respawns', node['respawns'],
prefix=node['id'] + '.respawns'),
])
events.extend([
self.createEvent('ok', 'active', active, prefix='total.active'),
self.createEvent('ok', 'accepting', accepting,
prefix='total.accepting'),
self.createEvent('ok', 'respawns', respawns,
prefix='total.respawns'),
])
defer.returnValue(events)
| 2.125 | 2 |
xform/utils.py | alisonamerico/Django-XForm | 3 | 11274 | <gh_stars>1-10
import datetime
import importlib
import json
import logging
import math
import mimetypes
import os
import re
import sys
import uuid
import requests
from urllib.parse import urljoin
from wsgiref.util import FileWrapper
from xml.dom import minidom, Node
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import ValidationError
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.http import HttpResponseNotFound, StreamingHttpResponse
from django.utils import timezone
from rest_framework import exceptions
from .tags import XFORM_ID_STRING, VERSION
PENDING = 0
SUCCESSFUL = 1
FAILED = 2
EXTERNAL_EXPORT_TYPES = ['xls']
EXPORT_EXT = {
'csv': 'csv',
'csvzip': 'csv_zip',
'kml': 'kml',
'savzip': 'sav_zip',
'uuid': 'external',
'xls': 'xls',
'xlsx': 'xls',
'zip': 'zip',
}
class XLSFormError(Exception):
pass
class DuplicateInstance(Exception):
def __str__(self):
return 'Duplicate Instance'
class InstanceInvalidUserError(Exception):
def __str__(self):
return 'Could not determine the user.'
class InstanceParseError(Exception):
def __str__(self):
return 'The instance could not be parsed.'
class InstanceEmptyError(InstanceParseError):
def __str__(self):
return 'Empty instance'
class NonUniqueFormIdError(Exception):
pass
class InstanceMultipleNodeError(Exception):
pass
class FormIsMergedDatasetError(Exception):
"""Exception class for merged datasets"""
def __str__(self):
return 'Submissions are not allowed on merged datasets.'
class FormInactiveError(Exception):
"""Exception class for inactive forms"""
def __str__(self):
return 'Form is inactive'
def generate_content_disposition_header(name, extension, show_date=True):
if name is None:
return 'attachment;'
if show_date:
name = "%s-%s" % (name, timezone.now().strftime("%Y-%m-%d-%H-%M-%S"))
return 'attachment; filename=%s.%s' % (name, extension)
def _get_all_attributes(node):
"""
Go through an XML document returning all the attributes we see.
"""
if hasattr(node, "hasAttributes") and node.hasAttributes():
for key in node.attributes.keys():
yield key, node.getAttribute(key)
for child in node.childNodes:
for pair in _get_all_attributes(child):
yield pair
def _flatten_dict_nest_repeats(d, prefix):
"""
Return a list of XPath, value pairs.
:param d: A dictionary
:param prefix: A list of prefixes
"""
for key, value in d.items():
new_prefix = prefix + [key]
if isinstance(value, dict):
for pair in _flatten_dict_nest_repeats(value, new_prefix):
yield pair
elif isinstance(value, list):
repeats = []
for i, item in enumerate(value):
item_prefix = list(new_prefix) # make a copy
if isinstance(item, dict):
repeat = {}
for path, value in _flatten_dict_nest_repeats(
item, item_prefix):
# TODO: this only considers the first level of repeats
repeat.update({u"/".join(path[1:]): value})
repeats.append(repeat)
else:
repeats.append({u"/".join(item_prefix[1:]): item})
yield (new_prefix, repeats)
else:
yield (new_prefix, value)
def _gather_parent_node_list(node):
node_names = []
# also check for grand-parent node to skip document element
if node.parentNode and node.parentNode.parentNode:
node_names.extend(_gather_parent_node_list(node.parentNode))
node_names.extend([node.nodeName])
return node_names
def xpath_from_xml_node(node):
node_names = _gather_parent_node_list(node)
return "/".join(node_names[1:])
def _xml_node_to_dict(node, repeats=[], encrypted=False):
if len(node.childNodes) == 0:
# there's no data for this leaf node
return None
elif len(node.childNodes) == 1 and \
node.childNodes[0].nodeType == node.TEXT_NODE:
# there is data for this leaf node
return {node.nodeName: node.childNodes[0].nodeValue}
else:
# this is an internal node
value = {}
for child in node.childNodes:
# handle CDATA text section
if child.nodeType == child.CDATA_SECTION_NODE:
return {child.parentNode.nodeName: child.nodeValue}
d = _xml_node_to_dict(child, repeats)
if d is None:
continue
child_name = child.nodeName
child_xpath = xpath_from_xml_node(child)
if list(d) != [child_name]:
raise AssertionError()
node_type = dict
# check if name is in list of repeats and make it a list if so
# All the photo attachments in an encrypted form use name media
if child_xpath in repeats or (encrypted and child_name == 'media'):
node_type = list
if node_type == dict:
if child_name not in value:
value[child_name] = d[child_name]
else:
# node is repeated, aggregate node values
node_value = value[child_name]
# 1. check if the node values is a list
if not isinstance(node_value, list):
# if not a list create
value[child_name] = [node_value]
# 2. parse the node
d = _xml_node_to_dict(child, repeats)
# 3. aggregate
value[child_name].append(d[child_name])
else:
if child_name not in value:
value[child_name] = [d[child_name]]
else:
value[child_name].append(d[child_name])
if value == {}:
return None
else:
return {node.nodeName: value}
def set_uuid(obj):
"""
Only give an object a new UUID if it does not have one.
"""
if not obj.uuid:
obj.uuid = uuid.uuid4().hex
def clean_and_parse_xml(xml_string):
clean_xml_str = xml_string.strip()
try:
clean_xml_str = clean_xml_str.decode("utf-8")
except Exception:
pass
clean_xml_str = re.sub(r">\s+<", u"><", clean_xml_str)
xml_obj = minidom.parseString(clean_xml_str)
return xml_obj
def get_meta_from_xml(xml_str, meta_name):
xml = clean_and_parse_xml(xml_str)
children = xml.childNodes
# children ideally contains a single element
# that is the parent of all survey elements
if children.length == 0:
raise ValueError("XML string must have a survey element.")
survey_node = children[0]
meta_tags = [n for n in survey_node.childNodes if
n.nodeType == Node.ELEMENT_NODE and
(n.tagName.lower() == "meta" or
n.tagName.lower() == "orx:meta")]
if len(meta_tags) == 0:
return None
# get the requested tag
meta_tag = meta_tags[0]
uuid_tags = [n for n in meta_tag.childNodes if
n.nodeType == Node.ELEMENT_NODE and
(n.tagName.lower() == meta_name.lower() or
n.tagName.lower() == u'orx:%s' % meta_name.lower())]
if len(uuid_tags) == 0:
return None
uuid_tag = uuid_tags[0]
return uuid_tag.firstChild.nodeValue.strip() if uuid_tag.firstChild\
else None
def flatten(l):
return [item for sublist in l for item in sublist]
def _get_fields_of_type(xform, types):
k = []
survey_elements = flatten(
[xform.get_survey_elements_of_type(t) for t in types])
for element in survey_elements:
name = element.get_abbreviated_xpath()
k.append(name)
return k
def get_numeric_fields(xform):
"""List of numeric field names for specified xform"""
return _get_fields_of_type(xform, ['decimal', 'integer'])
def get_uuid_from_xml(xml):
def _uuid_only(uuid, regex):
matches = regex.match(uuid)
if matches and len(matches.groups()) > 0:
return matches.groups()[0]
return None
uuid = get_meta_from_xml(xml, "instanceID")
regex = re.compile(r"uuid:(.*)")
if uuid:
return _uuid_only(uuid, regex)
# check in survey_node attributes
xml = clean_and_parse_xml(xml)
children = xml.childNodes
# children ideally contains a single element
# that is the parent of all survey elements
if children.length == 0:
raise ValueError("XML string must have a survey element.")
survey_node = children[0]
uuid = survey_node.getAttribute('instanceID')
if uuid != '':
return _uuid_only(uuid, regex)
return None
def numeric_checker(string_value):
if string_value.isdigit():
return int(string_value)
else:
try:
value = float(string_value)
if math.isnan(value):
value = 0
return value
except ValueError:
pass
def get_values_matching_key(doc, key):
"""
Returns iterator of values in 'doc' with the matching 'key'.
"""
def _get_values(doc, key):
if doc is not None:
if key in doc:
yield doc[key]
for z in doc.items():
v = z[1]
if isinstance(v, dict):
for item in _get_values(v, key):
yield item
elif isinstance(v, list):
for i in v:
for j in _get_values(i, key):
yield j
return _get_values(doc, key)
class XFormInstanceParser(object):
def __init__(self, xml_str, data_dictionary):
self.dd = data_dictionary
self.parse(xml_str)
def parse(self, xml_str):
self._xml_obj = clean_and_parse_xml(xml_str)
self._root_node = self._xml_obj.documentElement
repeats = [e.get_abbreviated_xpath()
for e in self.dd.get_survey_elements_of_type(u"repeat")]
self._dict = _xml_node_to_dict(self._root_node, repeats)
self._flat_dict = {}
if self._dict is None:
raise InstanceEmptyError
for path, value in _flatten_dict_nest_repeats(self._dict, []):
self._flat_dict[u"/".join(path[1:])] = value
self._set_attributes()
def get_root_node(self):
return self._root_node
def get_root_node_name(self):
return self._root_node.nodeName
def get(self, abbreviated_xpath):
return self.to_flat_dict()[abbreviated_xpath]
def to_dict(self):
return self._dict
def to_flat_dict(self):
return self._flat_dict
def get_attributes(self):
return self._attributes
def _set_attributes(self):
self._attributes = {}
all_attributes = list(_get_all_attributes(self._root_node))
for key, value in all_attributes:
# Since enketo forms may have the template attribute in
# multiple xml tags, overriding and log when this occurs
if key in self._attributes:
logger = logging.getLogger("console_logger")
logger.debug("Skipping duplicate attribute: %s"
" with value %s" % (key, value))
logger.debug(str(all_attributes))
else:
self._attributes[key] = value
def get_xform_id_string(self):
return self._attributes[u"id"]
def get_version(self):
return self._attributes.get(u"version")
def get_flat_dict_with_attributes(self):
result = self.to_flat_dict().copy()
result[XFORM_ID_STRING] = self.get_xform_id_string()
version = self.get_version()
if version:
result[VERSION] = self.get_version()
return result
def response_with_mimetype_and_name(mimetype,
name,
extension=None,
show_date=True,
file_path=None,
use_local_filesystem=False,
full_mime=False):
if extension is None:
extension = mimetype
if not full_mime:
mimetype = "application/%s" % mimetype
if file_path:
try:
if isinstance(file_path, InMemoryUploadedFile):
response = StreamingHttpResponse(
file_path, content_type=mimetype)
response['Content-Length'] = file_path.size
elif not use_local_filesystem:
default_storage = get_storage_class()()
wrapper = FileWrapper(default_storage.open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = default_storage.size(file_path)
else:
wrapper = FileWrapper(open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = os.path.getsize(file_path)
except IOError:
response = HttpResponseNotFound(
"The requested file could not be found.")
else:
response = HttpResponse(content_type=mimetype)
response['Content-Disposition'] = generate_content_disposition_header(
name, extension, show_date)
return response
def _get_export_type(export_type):
if export_type in list(EXPORT_EXT):
export_type = EXPORT_EXT[export_type]
else:
raise exceptions.ParseError(
"'%(export_type)s' format not known or not implemented!" %
{'export_type': export_type})
return export_type
def get_file_extension(content_type):
return mimetypes.guess_extension(content_type)[1:]
def get_media_file_response(metadata, username=None):
"""
Returns a HTTP response for media files.
HttpResponse 200 if it represents a file on disk.
HttpResponseRedirect 302 incase the metadata represents a url.
HttpResponseNotFound 404 if the metadata file cannot be found.
"""
if metadata.data_type == 'media' and metadata.data_file:
file_path = metadata.data_file.name
filename, extension = os.path.splitext(file_path.split('/')[-1])
extension = extension.strip('.')
dfs = get_storage_class()()
if dfs.exists(file_path):
return response_with_mimetype_and_name(
metadata.data_file_type,
filename,
extension=extension,
show_date=False,
file_path=file_path,
full_mime=True)
elif metadata.data_type == 'url' and not metadata.data_file:
url = requests.Request(
'GET', metadata.data_value, params={
'username': username
}
).prepare().url
try:
data_file = metadata.get_file(url)
except Exception:
raise Http404
return response_with_mimetype_and_name(
mimetype=data_file.content_type,
name=data_file.name,
extension=get_file_extension(data_file.content_type),
show_date=False,
file_path=data_file,
use_local_filesystem=False,
full_mime=True
)
return HttpResponseNotFound()
def report_exception(*args, **kwargs):
# dummy
return
def publish_form(callback):
"""
Calls the callback function to publish a XLSForm and returns appropriate
message depending on exception throw during publishing of a XLSForm.
"""
try:
return callback()
# except (PyXFormError, XLSFormError) as e:
# return {'type': 'alert-error', 'text': str(e)}
except IntegrityError as e:
return {
'type': 'alert-error',
'text': 'Form with this id or SMS-keyword already exists.',
}
# except ProcessTimedOut as e:
# # catch timeout errors
# return {
# 'type': 'alert-error',
# 'text': 'Form validation timeout, please try again.',
# }
except (MemoryError, OSError) as e:
return {
'type': 'alert-error',
'text': (
'An error occurred while publishing the form. '
'Please try again.'
),
}
except (AttributeError, Exception, ValidationError) as e:
report_exception("Form publishing exception: {}".format(e), str(e),
sys.exc_info())
return {'type': 'alert-error', 'text': str(e)}
def _get_tag_or_element_type_xpath(xform, tag):
elems = xform.get_survey_elements_of_type(tag)
return elems[0].get_abbreviated_xpath() if elems else tag
def calculate_duration(start_time, end_time):
"""
This function calculates duration when given start and end times.
An empty string is returned if either of the time formats does
not match '_format' format else, the duration is returned
"""
_format = "%Y-%m-%dT%H:%M:%S"
try:
_start = datetime.datetime.strptime(start_time[:19], _format)
_end = datetime.datetime.strptime(end_time[:19], _format)
except (TypeError, ValueError):
return ''
duration = (_end - _start).total_seconds()
return duration
def inject_instanceid(xml_str, uuid):
if get_uuid_from_xml(xml_str) is None:
xml = clean_and_parse_xml(xml_str)
children = xml.childNodes
if children.length == 0:
raise ValueError("XML string must have a survey element.")
# check if we have a meta tag
survey_node = children.item(0)
meta_tags = [
n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName.lower() == "meta"
]
if len(meta_tags) == 0:
meta_tag = xml.createElement("meta")
xml.documentElement.appendChild(meta_tag)
else:
meta_tag = meta_tags[0]
# check if we have an instanceID tag
uuid_tags = [
n for n in meta_tag.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName == "instanceID"
]
if len(uuid_tags) == 0:
uuid_tag = xml.createElement("instanceID")
meta_tag.appendChild(uuid_tag)
else:
uuid_tag = uuid_tags[0]
# insert meta and instanceID
text_node = xml.createTextNode(u"uuid:%s" % uuid)
uuid_tag.appendChild(text_node)
return xml.toxml()
return xml_str
class EnketoError(Exception):
default_message = "There was a problem with your submissionor form. Please contact support."
def __init__(self, message=None):
if message is None:
self.message = self.default_message
else:
self.message = message
def __str__(self):
return "{}".format(self.message)
def handle_enketo_error(response):
"""Handle enketo error response."""
try:
data = json.loads(response.content)
except ValueError:
pass
if response.status_code == 502:
raise EnketoError(
u"Sorry, we cannot load your form right now. Please try "
"again later.")
raise EnketoError()
else:
if 'message' in data:
raise EnketoError(data['message'])
raise EnketoError(response.text)
def enketo_url(
form_url, id_string, instance_xml=None,
instance_id=None,
return_url=None,
offline=False
):
if (not hasattr(settings, 'ENKETO_URL') or
not hasattr(settings, 'ENKETO_API_SURVEY_PATH') or
not hasattr(settings, 'ENKETO_API_TOKEN') or
settings.ENKETO_API_TOKEN == ''):
return False
values = {'form_id': id_string, 'server_url': form_url}
if instance_id and instance_xml:
url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH)
values.update({
'instance': instance_xml,
'instance_id': instance_id,
'return_url': return_url
})
else:
survey_path = settings.ENKETO_API_SURVEY_PATH
if offline:
survey_path += '/offline'
url = urljoin(settings.ENKETO_URL, survey_path)
response = requests.post(
url,
data=values,
auth=(settings.ENKETO_API_TOKEN, ''),
verify=getattr(settings, 'ENKETO_VERIFY_SSL', False))
if response.status_code in (200, 201):
try:
data = json.loads(response.content)
except ValueError:
pass
else:
url = (data.get('edit_url') or data.get('offline_url') or
data.get('url'))
if url:
return url
handle_enketo_error(response)
def get_form_url(
request, protocol='http', preview=False, # xform_pk=None
):
"""
Return a form list url endpoint to be used to make a request to Enketo.
For example, it will return https://example.com and Enketo will know to
look for the form list at https://example.com/formList. If a username is
provided then Enketo will request the form list from
https://example.com/[username]/formList. Same applies for preview if
preview is True and also to a single form when xform_pk is provided.
"""
http_host = request.META.get('HTTP_HOST', 'dev.monitora.sisicmbio.icmbio.gov.br')
url = '%s://%s' % (protocol, http_host)
if preview:
url = '%s/preview' % url
return "{}/xform".format(url)
def get_from_module(module_name, function_name):
module = importlib.import_module(module_name)
return getattr(module, function_name)
| 1.929688 | 2 |
Phase5/testing_query.py | MrKLawrence/Course-Registration-Data-Analytics | 0 | 11275 | import datetime
from pymongo import MongoClient
import pymongo
import pprint
try:
db = MongoClient("mongodb://localhost:27017")["hkust"]
f=0.05
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
# { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} },
# { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} },
{"$match": #filter timeslot
{"$and":[
# {"compareResult": "true"},
# {"satisfied" : "Yes"},
#{"sections.sectionId": {"$ne": null}},
#{"sections.sectionId": {"$exists": true}},
# {"sections.sectionId": {"$regex": '^L'}},
{"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
# "description":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"},
"lecSatisfiedCount":{"$last": "$lecSatisfiedCount"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
# pprint.pprint(listOfCourseWithWaitingListSize)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
except pymongo.errors.ConnectionFailure as error:
print("Document Insertion Failed! Error Message: \"{}\"".format(error))
import numpy
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
#Model 1
def trainModel(trainingDataFilename):
# to set a seed of a random number generator used in the "optimization" tool in the neural network model
numpy.random.seed(time.time())
# Step 1: to load the data
# Step 1a: to read the dataset with "numpy" function
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
# Step 1b: to split the dataset into two datasets, namely the input attribute dataset (X) and the target attribute dataset (Y)
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
# Step 5: To evaluate the model
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 2:
def trainModel2(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 3:
def trainModel3(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(64, input_dim=4, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 4:
def trainModel4(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='softmax'))
model.add(Dense(7, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='logcosh', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.3, epochs=300, batch_size=7)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 5:
def trainModel5(trainingDataFilename):
def trainModel5_beforeAddDrop(trainingDataFile_beforeAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_beforeAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
def trainModel5_afterAddDrop(trainingDataFile_afterAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_afterAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
| 2.859375 | 3 |
aws_iot/dashboard/migrations/0003_auto_20160427_1641.py | anduslim/aws_iot | 0 | 11276 | <reponame>anduslim/aws_iot
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_gatewaynode_sensorstickerreading'),
]
operations = [
migrations.CreateModel(
name='DerivedIntakeReading',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('modified_timestamp', models.DateTimeField(auto_now=True)),
('server_timestamp', models.DateTimeField(null=True, blank=True)),
('isOpen', models.NullBooleanField(verbose_name='Opened')),
],
options={
'verbose_name': 'Derived Intake Reading',
'verbose_name_plural': 'Derived Intake Reading',
},
),
migrations.RemoveField(
model_name='gatewaynode',
name='user2',
),
migrations.RemoveField(
model_name='medicationintake',
name='expected_intake',
),
migrations.RemoveField(
model_name='medicationintake',
name='user',
),
migrations.RemoveField(
model_name='sensornode',
name='medication_intake',
),
migrations.RemoveField(
model_name='sensorstickerreading',
name='gw_id',
),
migrations.RemoveField(
model_name='sensorstickerreading',
name='gw_timestamp',
),
migrations.AddField(
model_name='medicationintake',
name='expected_intake_timing',
field=models.TimeField(null=True, verbose_name='Expected Intake Time', blank=True),
),
migrations.AddField(
model_name='medicationintake',
name='med_desc',
field=models.CharField(max_length=32, null=True, blank=True),
),
migrations.AddField(
model_name='sensornode',
name='medication_intake_list',
field=models.ManyToManyField(to='dashboard.MedicationIntake', null=True, blank=True),
),
migrations.DeleteModel(
name='GatewayNode',
),
migrations.DeleteModel(
name='IntakeTime',
),
migrations.AddField(
model_name='derivedintakereading',
name='sensor_id',
field=models.ForeignKey(to='dashboard.SensorNode'),
),
]
| 1.640625 | 2 |
investing_algorithm_framework/core/models/__init__.py | coding-kitties/investing-algorithm-framework | 9 | 11277 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_all_tables():
db.create_all()
def initialize_db(app: Flask):
db.init_app(app)
db.app = app
from investing_algorithm_framework.core.models.order_status import OrderStatus
from investing_algorithm_framework.core.models.order_type import OrderType
from investing_algorithm_framework.core.models.order_side import OrderSide
from investing_algorithm_framework.core.models.time_unit import TimeUnit
from investing_algorithm_framework.core.models.order import Order
from investing_algorithm_framework.core.models.portfolio import Portfolio
from investing_algorithm_framework.core.models.position import Position
__all__ = [
"db",
"Portfolio",
"Position",
'Order',
"OrderType",
'OrderSide',
"TimeUnit",
"create_all_tables",
"initialize_db",
"OrderStatus"
]
| 2.046875 | 2 |
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | 207 | 11278 | <reponame>Mannan2812/azure-cli-extensions
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
)
| 1.039063 | 1 |
backend/db/patient.py | wooque/openpacs | 1 | 11279 | <reponame>wooque/openpacs
from db.table import Table
from db.study import Study
from db.series import Series
from pypika.pseudocolumns import PseudoColumn
class Patient(Table):
name = 'patients'
async def sync_db(self):
await self.exec("""
CREATE TABLE IF NOT EXISTS patients (
id SERIAL PRIMARY KEY,
patient_id TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
birth_date TEXT,
sex TEXT,
meta JSONB
);
""")
await self.exec("""
CREATE INDEX IF NOT EXISTS patients_patient_id ON patients(patient_id);
""")
async def insert_or_select(self, data):
q = self.select('*').where(self.table.patient_id == data['patient_id'])
p = await self.fetchone(q)
if p:
return p
q = self.insert().columns(
'patient_id', 'name', 'birth_date', 'sex',
).insert((
data['patient_id'], data['patient_name'],
data['patient_birth_date'], data['patient_sex'],
),).on_conflict('patient_id').do_update(
self.table.name, PseudoColumn('EXCLUDED.name'),
).returning('id')
patient_id = await self.fetchval(q)
return {'id': patient_id}
async def get_extra(self, patient_id):
from db.files import Files
q = self.select('*').where(self.table.id == patient_id)
patient = await self.fetchone(q)
patient = dict(patient)
StudyT = Study(self.conn)
q = StudyT.select('*').where(
StudyT.table.patient_id == patient_id
)
studies_data = await self.fetch(q)
studies_data = [dict(s) for s in studies_data]
studies = {}
for s in studies_data:
s['series'] = {}
studies[s['id']] = s
SeriesT = Series(self.conn)
q = SeriesT.select('*').where(
SeriesT.table.study_id.isin(list(studies.keys()))
)
series_data = await self.fetch(q)
series_data = [dict(s) for s in series_data]
for s in series_data:
s['files'] = []
studies[s['study_id']]['series'][s['id']] = s
FilesT = Files(self.conn)
q = FilesT.select('*').where(FilesT.table.study_id.isin(list(studies.keys())))
files = await self.fetch(q)
files = [dict(f) for f in files]
for f in files:
studies[f['study_id']]['series'][f['series_id']]['files'].append(f)
for s in studies.values():
s['series'] = list(s['series'].values())
patient['studies'] = list(studies.values())
return patient
| 2.625 | 3 |
egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 0 | 11280 | #!/usr/bin/env python
import sys
import json
def sec2str(seconds):
sec_int = int(round(seconds))
hh = sec_int / 3600
mm = (sec_int - hh * 3600) / 60
ss = sec_int - hh * 3600 - mm * 60
return "%d:%02d:%02d" % (hh, mm, ss)
if len(sys.argv) != 4:
print "Usage:", __file__, "<segment> <text> <json>"
print " e.g.:", __file__, "data/dev/segmetns data/dev/text trans.json"
sys.exit(1)
segment_filename = sys.argv[1]
text_filename = sys.argv[2]
output_filename = sys.argv[3]
start_time = {}
end_time = {}
utt2chn = {}
utt2id = {}
with open(segment_filename) as segmentfile:
for line in segmentfile:
fields = line.split()
utt = fields[0]
start_time[utt] = float(fields[2]);
end_time[utt] = float(fields[3]);
id, chn = fields[1].split("_", 1)
utt2chn[utt] = chn
utt2id[utt] = id
data = {}
with open(text_filename) as textfile:
for line in textfile:
utt, text = line.split(" ", 1)
chn = utt2chn[utt]
if chn not in data:
data[chn] = {
'EmpID1': utt2id[utt],
'transcript': []
}
start = sec2str(start_time[utt])
end = sec2str(end_time[utt])
utt_info = {
'start': start,
'end': end,
'usable': True,
'speaker': 'OFFICER',
'utterance': text.strip()
}
data[chn]['transcript'].append(utt_info)
with open(output_filename, 'w') as outfile:
json.dump(data, outfile)
| 2.71875 | 3 |
codeEval/hard/levenshtein_distance.py | ferhatelmas/algo | 25 | 11281 | <reponame>ferhatelmas/algo<filename>codeEval/hard/levenshtein_distance.py
import sys
from string import ascii_lowercase as alphabet
def generate_neighbours(ws, s):
ls, l = set(), len(s)
for i in xrange(l + 1):
ls.add(s[:i] + s[i + 1 :])
for e in alphabet:
ls.add(s[:i] + e + s[i:])
if i < l and e != s[i]:
ls.add(s[:i] + e + s[i + 1 :])
return ls.intersection(ws)
def generate_network(ws, s):
gen, r = generate_neighbours(ws, s), set(s)
while len(gen) > 0:
s = gen.pop()
if s not in r:
r.add(s)
gen.update(generate_neighbours(ws, s))
return len(r.intersection(ws))
test_cases = open(sys.argv[1], "r")
words = set([test.strip() for test in test_cases])
test_cases.close()
print generate_network(words, "hello")
| 3.375 | 3 |
models/RelSaleSizeProject.py | the-Minister-0001/cardano-nft-admin | 1 | 11282 | from sqlalchemy import Column, Integer
from sqlalchemy import ForeignKey
from sqlalchemy.orm import declarative_base
from .base import Base
class RelSaleSizeProject(Base):
__tablename__ = 'rel_salesizes_projects'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('projects.id'))
salesize_id = Column(Integer, ForeignKey('salesizes.id'))
| 2.484375 | 2 |
sudoku/recursive_solver.py | mkomaiha/NERS570-Sudoku | 0 | 11283 | from sudoku.constants import SIZE, BOX_SIZE
from sudoku import Sudoku
class RS(Sudoku):
def __init__(self, grade=0, id=None):
super().__init__(grade, id)
def possible(self, r, c, n):
for i in range(0, SIZE):
if self.solved[r, i] == n:
return False
for i in range(0, SIZE):
if self.solved[i, c] == n:
return False
c0 = (c//BOX_SIZE)*BOX_SIZE
r0 = (r//BOX_SIZE)*BOX_SIZE
for i in range(0, BOX_SIZE):
for j in range(0, BOX_SIZE):
if self.solved[r0+i, c0+j] == n:
return False
return True
def r_solve(self, printflag=False):
for r in range(SIZE):
for c in range(SIZE):
if self.solved[r, c] == 0:
for n in range(1, 10):
if self.possible(r, c, n):
self.solved[r, c] = n
# Prevent from reseting the board
if (self.r_solve(printflag)):
return True
self.solved[r, c] = 0
return False
if printflag == True:
print('recursive results:')
print(self.solved)
return True
def solve(self, printflag=False):
self.r_solve(printflag)
return self.solved
| 3.53125 | 4 |
scraper.py | souravkaranjai/python-webscraper | 0 | 11284 | <filename>scraper.py<gh_stars>0
#!/usr/bin/python3
print('Hello world') | 1.5 | 2 |
src/algoritmia/problems/binpacking/firstfitbinpacker.py | DavidLlorens/algoritmia | 6 | 11285 | <filename>src/algoritmia/problems/binpacking/firstfitbinpacker.py<gh_stars>1-10
from algoritmia.problems.binpacking.nextfitbinpacker import NextFitBinPacker
class FirstFitBinPacker(NextFitBinPacker):#[full
def pack(self, w: "IList<Real>", C: "Real") -> "IList<int>":
x = [None] * len(w)
free = []
for i in range(len(w)):
for j in range(len(free)):
if free[j] >= w[i]:
x[i] = j
free[j] -= w[i]
break
if x[i] == None:
x[i] = len(free)
free.append(C-w[i])
return x#]full | 2.625 | 3 |
facebook/matrixWordSearch.py | rando3/leetcode-python | 0 | 11286 | <reponame>rando3/leetcode-python
https://leetcode.com/problems/word-search/description/ | 1.726563 | 2 |
contrib/make_hdf.py | scopatz/PyTables | 9 | 11287 | #!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def is_scalar(item):
try:
iter(item)
#could be a string
try:
item[:0]+'' #check for string
return 'str'
except:
return 0
except:
return 'notstr'
def is_dict(item):
try:
item.iteritems()
return 1
except:
return 0
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
def make_row(data):
row_type={}
scalar_type=is_scalar(data)
if scalar_type:
if scalar_type=='str':
make_col(row_type, 'scalar', data, len(data))
else:
make_col(row_type, 'scalar', data, 0)
else: #it is a list-like
the_type=is_scalar(data[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col', data[0], the_max)
elif the_type:
make_col(row_type, 'col', data[0], 0)
else: #list within the list, make many columns
make_col(row_type, 'col_depth', 0, 0)
count=0
for col in data:
the_type=is_scalar(col[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col_'+str(count), col[0], the_max)
elif the_type:
make_col(row_type, 'col_'+str(count), col[0], 0)
else:
raise ValueError('too many nested levels of lists')
count+=1
return row_type
def add_table(fileh, group_obj, data, table_name):
#figure out if it is a list of lists or a single list
#get types of columns
row_type=make_row(data)
table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1)
row=table1.row
if is_scalar(data):
row['scalar']=data
row.append()
else:
if is_scalar(data[0]):
for i in data:
row['col']=i
row.append()
else:
count=0
for col in data:
row['col_depth']=len(col)
for the_row in col:
if is_scalar(the_row):
row['col_'+str(count)]=the_row
row.append()
else:
raise ValueError('too many levels of lists')
count+=1
table1.flush()
def add_cache(fileh, cache):
group_name='pytables_cache_v0';table_name='cache0'
root=fileh.root
group_obj=fileh.createGroup(root, group_name)
cache_str=cPickle.dumps(cache, 0)
cache_str=cache_str.replace('\n', chr(1))
cache_pieces=[]
while cache_str:
cache_part=cache_str[:8000];cache_str=cache_str[8000:]
if cache_part:
cache_pieces.append(cache_part)
row_type={}
row_type['col_0']=tables.Col("CharType", 8000)
#
table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1)
for piece in cache_pieces:
print len(piece)
table_cache.row['col_0']=piece
table_cache.row.append()
table_cache.flush()
def save2(hdf_file, data):
fileh=tables.openFile(hdf_file, mode='w', title='logon history')
root=fileh.root;cache_root=cache={}
root_path=root._v_pathname;root=0
stack = [ (root_path, data, cache) ]
table_num=0
count=0
while stack:
(group_obj_path, data, cache)=stack.pop()
#data='wilma':{'mother':[22,23,24]}}
#grp_name wilma
for grp_name in data:
#print 'fileh=',fileh
count+=1
cache[grp_name]={}
new_group_obj=fileh.createGroup(group_obj_path, grp_name)
#print 'path=',new_group_obj._v_pathname
new_path=new_group_obj._v_pathname
#if dict, you have a bunch of groups
if is_dict(data[grp_name]):#{'mother':[22,23,24]}
stack.append((new_path, data[grp_name], cache[grp_name]))
#you have a table
else:
#data[grp_name]=[110,130,140],[1,2,3]
add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num))
table_num+=1
#fileh=tables.openFile(hdf_file,mode='a',title='logon history')
add_cache(fileh, cache_root)
fileh.close()
########################
class Hdf_dict(dict):
def __init__(self,hdf_file,hdf_dict={},stack=[]):
self.hdf_file=hdf_file
self.stack=stack
if stack:
self.hdf_dict=hdf_dict
else:
self.hdf_dict=self.get_cache()
self.cur_dict=self.hdf_dict
def get_cache(self):
fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0')
table=fileh.root.cache0
total=[]
print 'reading'
begin=time.time()
for i in table.iterrows():
total.append(i['col_0'])
total=''.join(total)
total=total.replace(chr(1), '\n')
print 'loaded cache len=', len(total), time.time()-begin
begin=time.time()
a=cPickle.loads(total)
print 'cache', time.time()-begin
return a
def has_key(self, k):
return k in self.cur_dict
def keys(self):
return self.cur_dict.keys()
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
###########################################
def __len__(self):
return len(self.cur_dict)
def __getitem__(self, k):
if k in self.cur_dict:
#now check if k has any data
if self.cur_dict[k]:
new_stack=self.stack[:]
new_stack.append(k)
return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack)
else:
new_stack=self.stack[:]
new_stack.append(k)
fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack))
#cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma)
for table in fileh.root:
#return [ i['col_1'] for i in table.iterrows() ] #[9110,91]
#perhaps they stored a single item
try:
for item in table['scalar']:
return item
except:
#otherwise they stored a list of data
try:
return [ item for item in table['col']]
except:
cur_column=[]
total_columns=[]
col_num=0
cur_row=0
num_rows=0
for row in table:
if not num_rows:
num_rows=row['col_depth']
if cur_row==num_rows:
cur_row=num_rows=0
col_num+=1
total_columns.append(cur_column)
cur_column=[]
cur_column.append( row['col_'+str(col_num)])
cur_row+=1
total_columns.append(cur_column)
return total_columns
else:
raise KeyError(k)
def iterkeys(self):
for key in self.iterkeys():
yield key
def __iter__(self):
return self.iterkeys()
def itervalues(self):
for k in self.iterkeys():
v=self.__getitem__(k)
yield v
def iteritems(self):
# yield children
for k in self.iterkeys():
v=self.__getitem__(k)
yield (k, v)
def __repr__(self):
return '{Hdf dict}'
def __str__(self):
return self.__repr__()
#####
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self, d):
for k, v in d.iteritems():
self.__setitem__(k, v)
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError("Hdf Dict is empty")
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __hash__(self):
raise TypeError("Hdf dict bjects are unhashable")
if __name__=='__main__':
def write_small(file=''):
data1={
'fred':['a', 'b', 'c'],
'barney':[[9110, 9130, 9140], [91, 92, 93]],
'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}}
}
print 'saving'
save2(file, data1)
print 'saved'
def read_small(file=''):
#a=make_hdf.Hdf_dict(file)
a=Hdf_dict(file)
print a['wilma']
b=a['wilma']
for i in b:
print i
print a.keys()
print 'has fred', bool('fred' in a)
print 'length a', len(a)
print 'get', a.get('fred'), a.get('not here')
print 'wilma keys', a['wilma'].keys()
print 'barney', a['barney']
print 'get items'
print a.items()
for i in a.iteritems():
print 'item', i
for i in a.itervalues():
print i
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| 2.640625 | 3 |
jinja2content.py | firemark/new-site | 0 | 11288 | """
jinja2content.py
----------------
DONT EDIT THIS FILE
Pelican plugin that processes Markdown files as jinja templates.
"""
from jinja2 import Environment, FileSystemLoader, ChoiceLoader
import os
from pelican import signals
from pelican.readers import MarkdownReader, HTMLReader, RstReader
from pelican.utils import pelican_open
from tempfile import NamedTemporaryFile
class JinjaContentMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# will look first in 'JINJA2CONTENT_TEMPLATES', by default the
# content root path, then in the theme's templates
local_dirs = self.settings.get('JINJA2CONTENT_TEMPLATES', ['.'])
local_dirs = [os.path.join(self.settings['PATH'], folder)
for folder in local_dirs]
theme_dir = os.path.join(self.settings['THEME'], 'templates')
loaders = [FileSystemLoader(_dir) for _dir
in local_dirs + [theme_dir]]
if 'JINJA_ENVIRONMENT' in self.settings: # pelican 3.7
jinja_environment = self.settings['JINJA_ENVIRONMENT']
else:
jinja_environment = {
'trim_blocks': True,
'lstrip_blocks': True,
'extensions': self.settings['JINJA_EXTENSIONS']
}
self.env = Environment(
loader=ChoiceLoader(loaders),
**jinja_environment)
def read(self, source_path):
with pelican_open(source_path) as text:
text = self.env.from_string(text).render()
with NamedTemporaryFile(delete=False) as f:
f.write(text.encode())
f.close()
content, metadata = super().read(f.name)
os.unlink(f.name)
return content, metadata
class JinjaMarkdownReader(JinjaContentMixin, MarkdownReader):
pass
class JinjaRstReader(JinjaContentMixin, RstReader):
pass
class JinjaHTMLReader(JinjaContentMixin, HTMLReader):
pass
def add_reader(readers):
for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]:
for ext in Reader.file_extensions:
readers.reader_classes[ext] = Reader
def register():
signals.readers_init.connect(add_reader)
| 2.53125 | 3 |
task3/code/video_process.py | haohaoqian/STD | 1 | 11289 | <filename>task3/code/video_process.py
import json
from tqdm import tqdm
from utils import *
from alexnet import AlexNet
def classify(net, folder_name, resize=(224, 224)):
transform = []
if resize:
transform.append(torchvision.transforms.Resize(resize))
transform.append(torchvision.transforms.ToTensor())
transform.append(torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])) # 归一化
transform = torchvision.transforms.Compose(transform)
results = []
img_dir = folder_name + '/rgb/'
img_names = list(filter(lambda x: x.endswith(".jpg"), os.listdir(img_dir)))
for img_name in img_names:
image = Image.open(img_dir + img_name)
image = transform(image)
results.append(net.predict(torch.unsqueeze(image, dim=0)))
results = torch.cat(results, dim=0)
return torch.mean(results, dim=0).cpu().numpy()
def dump_test(file_root, save_name):
json_data = {}
# root = './dataset/task2/test/'
# save_name = './dataset/task2.json'
root = file_root
for i in tqdm(range(10)):
sub_root = root + str(i) + '/'
folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root)))
for folder in folders:
folder_path = sub_root + folder
images, is_moved = video_loader(folder_path)
json_data[folder_path] = collide_detection(images, is_moved)
with open(save_name, "w") as f:
json.dump(json_data, f)
def dump_train(file_root, save_name, blocks=True):
json_data = {}
# root = './dataset/train/'
# save_name = './dataset/train.json'
root = file_root
for sub_root in os.listdir(root):
print('\n collecting %s' % sub_root)
sub_root = root + sub_root + '/'
folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root)))
for folder in tqdm(folders):
folder_path = sub_root + folder
images, is_moved = video_loader(folder_path)
if blocks:
json_data[folder_path] = collide_detection_blocks(images, is_moved)
else:
json_data[folder_path] = collide_detection(images, is_moved)
with open(save_name, "w") as f:
json.dump(json_data, f)
def dump_file(): # 用来标记各视频撞击位置的函数,分类用不到
dump_train('./dataset/train/', './dataset/train_blocks_0.2.json')
dump_test('./dataset/task2/test/', "./dataset/task2_blocks_0.2.json")
dump_test('./dataset/task3/test/', "./dataset/task3_blocks_0.2.json")
def get_video_feature(net, folder_name, resize=(224, 224)):
"""
:param folder_name: 从当前路径访问到‘video_0000’文件夹的路径
:param resize:默认为(224,224)
:return: 14维特征向量,前10维为分类标签
['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
后4维维撞击位置[上,下,左。右]
"""
class_feature = classify(net, folder_name, resize)
images, is_moved = video_loader(folder_name)
move_feature = collide_detection_blocks(images, is_moved)
#feature = np.concatenate([class_feature, move_feature])
return class_feature, move_feature
#if __name__ == '__main__':
#net = AlexNet()
#net.load_state_dict(torch.load('./alexnet.pt'))
# idx_to_class = ['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
# 'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
# classes = classify(net, './dataset/task2/test/0/video_0006')
#import json
#import os
#label = dict()
#path='./dataset/train'
#for folder in os.listdir(path):
#for sample in os.listdir(os.path.join(path, folder)):
#images, is_moved = video_loader(os.path.join(path, folder, sample))
#move_feature = collide_detection_blocks(images, is_moved)
#label[folder + '/' + sample] = move_feature
#with open('./dataset/train.json', 'w') as f:
#json.dump(label,f) | 2.3125 | 2 |
Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 1 | 11290 | <reponame>rafaelmcam/RTOs_ChibiOS
import serial
with serial.Serial("/dev/ttyUSB0", 115200) as ser:
while 1:
for i in range(5):
n = ser.read()[0]
print("{:x}".format(n))
print("--------")
| 2.703125 | 3 |
packages/pyre/parsing/Parser.py | avalentino/pyre | 25 | 11291 | <gh_stars>10-100
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2021 all rights reserved
#
class Parser:
"""
The base class for parsers
"""
# types
from .exceptions import ParsingError, SyntaxError, TokenizationError
# meta methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# build my scanner
self.scanner = self.lexer()
# all done
return
# implementation details
lexer = None # my scanner factory
scanner = None # my scanner instance
# end of file
| 2.15625 | 2 |
shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | 1 | 11292 | <filename>shfl/data_distribution/data_distribution_non_iid.py
import numpy as np
import random
import tensorflow as tf
from shfl.data_base.data_base import shuffle_rows
from shfl.data_distribution.data_distribution_sampling import SamplingDataDistribution
class NonIidDataDistribution(SamplingDataDistribution):
"""
Implementation of a non-independent and identically distributed data distribution using \
[Data Distribution](../data_distribution/#datadistribution-class)
In this data distribution we simulate the scenario in which clients have non-identical distribution since
they know partially the total classes of the problem.
This distribution only works with classification problems.
"""
@staticmethod
def choose_labels(num_nodes, total_labels):
"""
Method that randomly choose labels used for each client in non-iid scenario.
# Arguments:
num_nodes: Number of nodes
total_labels: Number of labels
# Returns:
labels_to_use
"""
random_labels = []
for i in range(0, num_nodes):
num_labels = random.randint(2, total_labels)
labels_to_use = []
for j in range(num_labels):
label = random.randint(0, total_labels - 1)
if label not in labels_to_use:
labels_to_use.append(label)
else:
while label in labels_to_use:
label = random.randint(0, total_labels - 1)
labels_to_use.append(label)
random_labels.append(labels_to_use)
return random_labels
def make_data_federated(self, data, labels, percent, num_nodes=1, weights=None, sampling="with_replacement"):
"""
Method that makes data and labels argument federated in a non-iid scenario.
# Arguments:
data: Data to federate
labels: Labels to federate
num_nodes: Number of nodes to create
percent: Percent of the data (between 0 and 100) to be distributed (default is 100)
weights: Array of weights for weighted distribution (default is None)
sampling: methodology between with or without sampling (default "without_sampling")
# Returns:
federated_data: A list containing the data for each client
federated_label: A list containing the labels for each client
"""
if weights is None:
weights = np.full(num_nodes, 1/num_nodes)
# Check label's format
if labels.ndim == 1:
one_hot = False
labels = tf.keras.utils.to_categorical(labels)
else:
one_hot = True
# Shuffle data
data, labels = shuffle_rows(data, labels)
# Select percent
data = data[0:int(percent * len(data) / 100)]
labels = labels[0:int(percent * len(labels) / 100)]
num_data = len(data)
# We generate random classes for each client
total_labels = np.unique(labels.argmax(axis=-1))
random_classes = self.choose_labels(num_nodes, len(total_labels))
federated_data = []
federated_label = []
if sampling == "with_replacement":
for i in range(0, num_nodes):
labels_to_use = random_classes[i]
idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)])
data_aux = data[idx]
labels_aux = labels[idx]
# Shuffle data
data_aux, labels_aux = shuffle_rows(data_aux, labels_aux)
percent_per_client = min(int(weights[i]*num_data), len(data_aux))
federated_data.append(np.array(data_aux[0:percent_per_client, ]))
federated_label.append(np.array(labels_aux[0:percent_per_client, ]))
else:
if sum(weights) > 1:
weights = np.array([float(i) / sum(weights) for i in weights])
for i in range(0, num_nodes):
labels_to_use = random_classes[i]
idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)])
data_aux = data[idx]
rest_data = data[~idx]
labels_aux = labels[idx]
rest_labels = labels[~idx]
data_aux, labels_aux = shuffle_rows(data_aux, labels_aux)
percent_per_client = min(int(weights[i] * num_data), len(data_aux))
federated_data.append(np.array(data_aux[0:percent_per_client, ]))
rest_data = np.append(rest_data, data_aux[percent_per_client:, ], axis=0)
federated_label.append(np.array(labels_aux[0:percent_per_client, ]))
rest_labels = np.append(rest_labels, labels_aux[percent_per_client:, ], axis=0)
data = rest_data
labels = rest_labels
if not one_hot:
federated_label = np.array([np.argmax(node, 1) for node in federated_label])
return federated_data, federated_label
| 3.0625 | 3 |
example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | 2 | 11293 | <reponame>AndersonHJB/learning_spider<gh_stars>1-10
# -*- encoding: utf-8 -*-
'''
@Time : 2021-06-08
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 增
'''
# here put the import lib
from pymongo import MongoClient
from bson import ObjectId
connection: MongoClient = MongoClient('mongodb://localhost:27017')
collection = connection['local']['startup_log']
# 查询方法
# 集合对象查看find开头的方法既是所求,一般使用: find 查询多个结果 与 find_one 查询单个结果
# collection.find
# collection.find_one
# 实际上,查询所需要的参数都是 mongo查询 本身所定义的,而不是 pymongo所自定义的
# 基本上在 mongo命令行 里能够执行的命令,pymongo 会有方法对应
# filter
# 用于说明需要的数据的情况. 类似于SQL语句中WHERE对于结果的限定
# 可以进行逻辑判断,类型判断等操作
_filter = {'pid': 4444} # pid的值为4444的记录
result = collection.find_one(_filter)
print(result)
# projection
# 用于设置返回记录所拥有的键
# 若指定某些键为1,则仅返回指定的键
# 若指定某些键位0, 则返回指定为0的键以外的键
# 若不加以指定,返回结果默认会带有 _id 这个键s
projection = {'_pid': 1, 'hostname': 1}
result = collection.find_one(_filter, projection)
print(result)
collection.find_one({'_id': ObjectId('EvilMass-1619315049192')}) # 根据_id查询时注意类型
# skip
# 用于跳过指定数量的查询结果
result = collection.find(_filter, projection, skip=1)
print(list(result))
# limit
# 用于限定返回结果的数量
result = collection.find(_filter, projection, limit=2)
print(list(result))
# collection.count_documents
# 用于统计结果数
result = collection.count_documents({'_pid': 4444})
print(result)
| 2.65625 | 3 |
install-b9s.py | ihaveamac/hardmod-b9s-installer | 13 | 11294 | <reponame>ihaveamac/hardmod-b9s-installer
#!/usr/bin/env python3
import hashlib
import os
import shutil
import subprocess
import sys
import time
def doexit(msg, errcode=0):
print(msg)
input('Press Enter to continue...')
sys.exit(errcode)
if not os.path.isfile('NAND.bin'):
doexit('NAND.bin not found.', errcode=1)
if os.path.isfile('firm0firm1.bak'):
doexit('firm0firm1.bak was found.\n'
'In order to prevent writing a good backup with a bad one, the '
'install has stopped. Please move or delete the old file if you '
'are sure you want to continue. If you would like to restore, use '
'`restore-firm0firm1`.',
errcode=1)
if os.path.isfile('NAND-patched.bin'):
doexit('NAND-patched.bin was found.\n'
'Please move or delete the patched NAND before patching another.',
errcode=1)
if not os.path.isfile('current.firm'):
doexit('current.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm'):
doexit('boot9strap.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm.sha'):
doexit('boot9strap.firm.sha not found.', errcode=1)
print('Verifying boot9strap.firm.')
with open('boot9strap.firm.sha', 'rb') as f:
b9s_hash = f.read(0x20)
with open('boot9strap.firm', 'rb') as f:
if hashlib.sha256(f.read(0x400000)).digest() != b9s_hash:
doexit('boot9strap.firm hash check failed.', errcode=1)
print('boot9strap.firm hash check passed.')
readsize = 0x100000 # must be divisible by 0x3AF00000 and 0x4D800000
shutil.rmtree('work', ignore_errors=True)
os.makedirs('work', exist_ok=True)
def runcommand(cmdargs):
proc = subprocess.Popen(cmdargs, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
procoutput = proc.communicate()[0]
# return(procoutput)
if proc.returncode != 0:
print('{} had an error.'.format(cmdargs[0]))
print('Full command: {}'.format(' '.join(cmdargs)))
print('Output:')
print(procoutput)
overall_time = time.time()
print('Trying to open NAND.bin...')
with open('NAND.bin', 'rb+') as nand:
print('Backing up FIRM0FIRM1 to firm0firm1.bin...')
nand.seek(0xB130000)
start_time = time.time()
with open('firm0firm1.bak', 'wb') as f:
for curr in range(0x800000 // readsize):
f.write(nand.read(readsize))
print('Reading {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
print('\nReading finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Creating FIRMs to xor from boot9strap.firm.')
start_time = time.time()
with open('current.firm', 'rb') as f:
with open('work/current_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
with open('boot9strap.firm', 'rb') as f:
with open('work/boot9strap_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
print('Creation finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with current.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'firm0firm1.bak',
'work/current_pad.bin', 'work/xored.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with boot9strap.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'work/xored.bin',
'work/boot9strap_pad.bin', 'work/final.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Writing final FIRMs to NAND.bin.')
with open('work/final.bin', 'rb') as f:
firm_final = f.read(0x800000)
nand.seek(0xB130000)
start_time = time.time()
for curr in range(0x800000 // readsize):
print('Writing {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
nand.write(bytes(firm_final[curr * readsize:(curr + 1) * readsize]))
print('\nWriting finished in {:>.2f} seconds.'.format(
time.time() - start_time))
os.rename('NAND.bin', 'NAND-patched.bin')
doexit('boot9strap install process finished in {:>.2f} seconds.'.format(
time.time() - overall_time))
| 2.015625 | 2 |
pygazebo/connection.py | robobe/pygazebo | 0 | 11295 | <reponame>robobe/pygazebo
import concurrent
import time
import math
import sys
import asyncio
import logging
from . import msg
from .parse_error import ParseError
from . import DEBUG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(DEBUG_LEVEL)
async def _wait_closed(stream):
assert(sys.version_info.major >= 3)
if sys.version_info.minor >= 7:
await stream.wait_closed()
class DisconnectError(Exception):
def __init__(self,
connection_name: str,
server_addr: tuple,
local_addr: tuple,
discarded_bytes: int):
"""
:param connection_name: Name of the connection
:param server_addr: remote address of the connection (address, port)
:type server_addr: tuple[str, int]
:param local_addr: local address of the connection (address, port)
:type local_addr: tuple[str, int]
:param discarded_bytes: number of bytes not read from the socket
"""
self._connection_name = connection_name
self._server_addr = server_addr
self._local_addr = local_addr
self._discarded_bytes = discarded_bytes
@staticmethod
def _to_addr(addr):
return f'{addr[0]}:{addr[1]}'
def __str__(self):
return f'DisconnectError' \
f'({self._connection_name}: {self._to_addr(self._local_addr)} -> {self._to_addr(self._server_addr)})' + \
(f' bytes not collected: {self._discarded_bytes}' if self._discarded_bytes is not None and self._discarded_bytes > 0 else '')
class Server(object):
def __init__(self, name: str):
self._name = name
self._server = None
self._listen_host = None
self._listen_port = None
self._running_server = None
async def serve(self, handler):
"""
Start TCP server
:param handler: called for each new connection. async function
:type handler: async lambda reader, writer -> None
:return:
"""
self._server = await asyncio.start_server(handler, host='0.0.0.0')
self._listen_host, self._listen_port = self._server.sockets[0].getsockname()
logger.info(f"Listening on {self._listen_port}:{self._listen_port}")
self._running_server = asyncio.ensure_future(self._server_loop())
return self._listen_host, self._listen_port
async def _server_loop(self):
if sys.version_info.minor >= 7:
async with self._server:
await self._server.serve_forever()
else:
await self._server.wait_closed()
async def close(self):
self._server.close()
await _wait_closed(self._server)
try:
await self._running_server
except concurrent.futures.CancelledError:
pass
@property
def listen_host(self):
assert self._server is not None
return self._listen_host
@property
def listen_port(self):
assert self._server is not None
return self._listen_port
class Connection(object):
"""Manages a Gazebo protocol connection.
"""
def __init__(self, name):
self.name = name
self._address = None
self._port = None
self._reader = None
self._writer = None
self._closed = True
async def connect(self, address, port):
logger.debug('Connection.connect')
self._address = address
self._port = port
reader, writer = await asyncio.open_connection(address, port)
self.accept_connection(reader, writer)
def accept_connection(self, reader, writer):
self._reader = reader
self._writer = writer
self._closed = False
async def close(self):
if self._closed:
logger.debug("Trying to close an already closed connection")
return
self._closed = True
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
await _wait_closed(self._writer)
async def write_packet(self, name: str, message, timeout):
assert not self._closed
packet = msg.packet_pb2.Packet()
cur_time = time.time()
packet.stamp.sec = int(cur_time)
packet.stamp.nsec = int(math.fmod(cur_time, 1) * 1e9)
packet.type = name.encode()
packet.serialized_data = message.SerializeToString()
await self._write(packet.SerializeToString(), timeout)
async def write(self, message, timeout=None):
data = message.SerializeToString()
await self._write(data, timeout)
async def _write(self, data, timeout):
header = ('%08X' % len(data)).encode()
self._writer.write(header + data)
await asyncio.wait_for(self._writer.drain(), timeout=timeout)
async def read_raw(self):
"""
Read incoming packet without parsing it
:return: byte array of the packet
"""
header = None
try:
assert not self._closed
header = await self._reader.readexactly(8)
if len(header) < 8:
raise ParseError('malformed header: ' + str(header))
try:
size = int(header, 16)
except ValueError:
raise ParseError('invalid header: ' + str(header))
else:
data = await self._reader.readexactly(size)
return data
except (ConnectionResetError, asyncio.streams.IncompleteReadError) as e:
if self._closed:
return None
else:
local_addr, local_port = self._writer.transport.get_extra_info('sockname')
discarded_bytes = len(e.partial) if isinstance(e, asyncio.streams.IncompleteReadError) else None
if header is not None:
discarded_bytes += 8
raise DisconnectError(
connection_name=self.name,
server_addr=(self._address, self._port),
local_addr=(local_port, local_addr),
discarded_bytes=discarded_bytes
) from e
async def read_packet(self):
data = await self.read_raw()
if not self._closed:
packet = msg.packet_pb2.Packet.FromString(data)
return packet
| 2.4375 | 2 |
src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | 0 | 11296 | <reponame>ivangarrera/MachineLearning<gh_stars>0
from common_clustering import CommonClustering
#■clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv')
clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv')
attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')]
clustering_features.attr = attr
clustering_features.PrincipalComponentAnalysis(num_components=2)
# Get the number of clusters that provides the best results
ideal_number_of_clusters = clustering_features.getBestNumberOfClusters()
# Plot silhuettes array
clustering_features.PlotSilhouettes()
# Print k-means with the best number of clusters that have been found
labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters)
# Interprate k-means groups
clustering_features.data_set['labels'] = labels
data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean()
# Plot 3D graph to interpretate k-means groups
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data_set_labels_mean.values[:,0],
data_set_labels_mean.values[:,1],
data_set_labels_mean.values[:,2])
plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr))
plt.show()
# Agglomerative clustering algorithm using nearest neighbors matrix
clustering_features.AgglomerativeClusteringWithNearestNeighbors()
# DBSCAN Clustering algorithm
labels = clustering_features.DBSCANClustering()
# Interprate outliers
clustering_features.data_set['labels'] = labels
data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)]
# Show outliers in a 3D graph with all points in the dataset
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(clustering_features.data_set.values[:,0],
clustering_features.data_set.values[:,1],
clustering_features.data_set.values[:,2])
ax.scatter(data_set_outliers.values[:,0],
data_set_outliers.values[:,1],
data_set_outliers.values[:,2], c='red', s=50)
plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr))
plt.show()
| 3.34375 | 3 |
app.py | Geo-Gabriel/REST-Api-Hotels | 0 | 11297 | from blacklist import BLACKLIST
from flask import Flask, jsonify
from flask_restful import Api
from resources.hotel import Hoteis, Hotel
from resources.user import User, UserLogin, UserLogout, UserRegister, Users
from resources.site import Site, Sites
from flask_jwt_extended import JWTManager
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj'
app.config['JWT_BLACKLIST_ENABLED'] = True
api = Api(app)
jwt = JWTManager(app)
@app.before_first_request
def create_db():
db.create_all()
@jwt.token_in_blacklist_loader
def verify_block_list(token):
return token['jti'] in BLACKLIST
@jwt.revoked_token_loader
def revoked_access_token():
return jsonify({'message': "You have been logged out."}), 401 # Unautorized
# Hotels resource
api.add_resource(Hoteis, '/hoteis')
api.add_resource(Hotel, '/hoteis/<string:hotel_id>')
# Users resource
api.add_resource(Users, '/users')
api.add_resource(User, '/users/<string:user_id>')
# User register resource
api.add_resource(UserRegister, '/register')
# Login resource
api.add_resource(UserLogin, '/login')
# Logout resource
api.add_resource(UserLogout, '/logout')
# Sites resource
api.add_resource(Sites, '/sites')
api.add_resource(Site, '/sites/<string:site_url>')
if __name__ == '__main__':
from database.sql_alchemy import db
db.init_app(app)
app.run(debug=True)
| 2.265625 | 2 |
scdlbot/__init__.py | samillinier/habesha-skin-pack | 0 | 11298 | <filename>scdlbot/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for Music Downloader Telegram Bot."""
# version as tuple for simple comparisons
VERSION = (0, 9, 16)
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
# string created from tuple to avoid inconsistency
__version__ = ".".join([str(x) for x in VERSION])
| 2.046875 | 2 |
uPython/lopyhelper.py | somervda/ourLora | 0 | 11299 | import struct
import pycom
import time
from network import LoRa
def blink(seconds, rgb):
pycom.rgbled(rgb)
time.sleep(seconds)
pycom.rgbled(0x000000) # off
def setUSFrequencyPlan(lora):
""" Sets the frequency plan that matches the TTN gateway in the USA """
# remove all US915 channels
for channel in range(0, 72):
lora.remove_channel(channel)
# set all channels to the same frequency (must be before sending the OTAA join request)
ttn_start_frequency = 903900000
ttn_step_frequency = 200000
ttn_ch8_frequency = 904600000
# Set up first 8 US915 TTN uplink channels
for channel in range(0, 9):
if (channel == 8):
channel_frequency = ttn_ch8_frequency
# DR3 = SF8/500kHz
channel_dr_min = 4
channel_dr_max = 4
else:
channel_frequency = ttn_start_frequency + \
(channel * ttn_step_frequency)
# DR0 = SF10/125kHz
channel_dr_min = 0
# DR3 = SF7/125kHz
channel_dr_max = 3
lora.add_channel(channel, frequency=channel_frequency,
dr_min=channel_dr_min, dr_max=channel_dr_max)
print("Added channel", channel, channel_frequency,
channel_dr_min, channel_dr_max)
def join(app_eui, app_key, useADR):
""" Join the Lorawan network using OTAA. new lora session is returned """
# Set the power to 20db for US915
# You can also set the default dr value but I found that was problematic
# You need to turn on adr (auto data rate) at this point if it is to be used
# only use adr for static devices (Not moving)
# see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915,
adr=useADR, tx_power=20)
setUSFrequencyPlan(lora)
print('Joining', end='')
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
# wait until the module has joined the network
while not lora.has_joined():
time.sleep(2.5)
blink(.5, 0xff8f00) # dark orange
print('.', end='')
print('')
print('Joined')
blink(2, 0x006400) # dark green
return lora
def send(lora, socket, port, payload, useADR):
""" send data to the lorawan gateway on selected port """
blink(.5, 0x00008b) # dark blue
socket.setblocking(True)
socket.bind(port)
print("Sending data:", payload.pack(), " Size:", payload.calcsize())
socket.send(payload.pack())
# Give send a extra second to be returned before switching
# the socket blocking mode (May not need this)
time.sleep(1)
socket.setblocking(False)
lora.nvram_save()
class gps_payload:
""" Class for managing the GPS payload data that is transmitted to the lorawan service
update the class properties and struct definition for the particular use case """
longitude = 0
latitude = 0
pack_format = "ff"
def __init__(self, longitude, latitude):
self.longitude = longitude # Float
self.latitude = latitude # Float
# see format options here https://docs.python.org/2/library/struct.html#format-characters
# Noter: use single precision float f for GPS Lng/Lat to get locations down to a meter
def pack(self):
return struct.pack(self.pack_format, self.longitude, self.latitude)
def calcsize(self):
return struct.calcsize(self.pack_format)
class sensor_payload:
""" Class for managing the sensor payload data that is transmitted to the lorawan service
update the class properties and struct definition for the particular use case """
celsius = 0
humidity = 0
waterlevel = 0
voltage = 0
pack_format = "bBBB"
def __init__(self, celsius, humidity, waterlevel, voltage):
self.celsius = celsius # In +/- celsius
self.humidity = humidity # In percentage
self.waterlevel = waterlevel # in centimeters
self.voltage = voltage # In tenths of a volt
# see format options here https://docs.python.org/2/library/struct.html#format-characters
def pack(self):
return struct.pack(self.pack_format, self.celsius, self.humidity, self.waterlevel, self.voltage)
def calcsize(self):
return struct.calcsize(self.pack_format)
| 2.6875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.