repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sankalpg/Essentia_tonicDebug_TEMP | src/python/essentia/extractor/segmentation_simple.py | 10 | 2872 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
import numpy
import sys
from essentia import INFO
from essentia.progress import Progress
namespace = 'lowlevel'
dependencies = None
def is_silent_threshold(frame, silence_threshold_dB):
p = essentia.instantPower( frame )
silence_threshold = pow(10.0, (silence_threshold_dB / 10.0))
if p < silence_threshold:
return 1.0
else:
return 0.0
def compute(audio, pool, options):
# analysis parameters
sampleRate = options['sampleRate']
frameSize = options['frameSize']
hopSize = options['hopSize']
windowType = options['windowType']
# frame algorithms
frames = essentia.FrameGenerator(audio = audio, frameSize = frameSize, hopSize = hopSize)
window = essentia.Windowing(size = frameSize, zeroPadding = 0, type = windowType)
spectrum = essentia.Spectrum(size = frameSize)
# spectral algorithms
energy = essentia.Energy()
mfcc = essentia.MFCC(highFrequencyBound = 8000)
INFO('Computing Low-Level descriptors necessary for segmentation...')
# used for a nice progress display
total_frames = frames.num_frames()
n_frames = 0
start_of_frame = -frameSize*0.5
progress = Progress(total = total_frames)
for frame in frames:
frameScope = [ start_of_frame / sampleRate, (start_of_frame + frameSize) / sampleRate ]
#pool.setCurrentScope(frameScope)
pool.add(namespace + '.' + 'scope', frameScope)
if options['skipSilence'] and essentia.isSilent(frame):
total_frames -= 1
start_of_frame += hopSize
continue
frame_windowed = window(frame)
frame_spectrum = spectrum(frame_windowed)
# need the energy for getting the thumbnail
pool.add(namespace + '.' + 'spectral_energy', energy(frame_spectrum))
# mfcc
(frame_melbands, frame_mfcc) = mfcc(frame_spectrum)
pool.add(namespace + '.' + 'spectral_mfcc', frame_mfcc)
# display of progress report
progress.update(n_frames)
n_frames += 1
start_of_frame += hopSize
progress.finish()
| agpl-3.0 |
shubhdev/edx-platform | common/test/acceptance/tests/studio/test_studio_settings_details.py | 4 | 7591 | """
Acceptance tests for Studio's Settings Details pages
"""
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...fixtures.course import CourseFixture
from ...pages.studio.settings import SettingsPage
from ...pages.studio.overview import CourseOutlinePage
from ...tests.studio.base_studio_test import StudioCourseTest
from ..helpers import (
generate_course_key,
select_option_by_value,
is_option_value_selected,
element_has_text,
)
class SettingsMilestonesTest(StudioCourseTest):
"""
Tests for milestones feature in Studio's settings tab
"""
def setUp(self, is_staff=True):
super(SettingsMilestonesTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
self.assertTrue(self.settings_detail.is_browser_on_page())
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again and confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
@skip('Passes in devstack, passes individually in Jenkins, fails in suite in Jenkins.')
def test_enable_entrance_exam_for_course(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And also that the entrance exam is destroyed after deselecting the checkbox.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
# title with text 'Entrance Exam' should be present on page.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
# Delete the currently created entrance exam.
self.settings_detail.visit()
self.settings_detail.require_entrance_exam(required=False)
self.settings_detail.save_changes()
course_outline_page.visit()
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
def test_entrance_exam_has_unit_button(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And user has option to add units only instead of any Subsection.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
course_outline_page.wait_for_ajax()
# button with text 'New Unit' should be present.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Unit'
))
# button with text 'New Subsection' should not be present.
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Subsection'
))
| agpl-3.0 |
torresj/cafe | contacto.py | 2 | 7363 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 3 13:07:55 2013
@author: jaime
"""
import web
from web.contrib.template import render_mako
from web import form
import pymongo
import feedparser
import time
from keys import *
import tweepy
render = render_mako(
directories=['plantillas'],
input_encoding='utf-8',
output_encoding='utf-8',
)
'''
Esta funcion sirve para actualizar el tiempo del ultimo
acceso al rss, si fuera necesario. Comprobara si han pasado
mas de 10 minutos desde la ultima vez, y si es asi, volverá
a descargar el rss
'''
def actualiza_tiempo():
conn=pymongo.MongoClient()
db=conn.mydb
cache=db.cache
tiempo1=time.time()
t=cache.find_one({"rss":"el pais"})
tiempo2=t[u'ult_act']
if((tiempo2- tiempo1)>600):
cache.update({"rss": "el pais"}, {"$set": {"ult_act": time.time()}})
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
conn.close()
def actualiza_tweet():
conn=pymongo.MongoClient()
db=conn.mydb
cache=db.cache
tiempo1=time.time()
t=cache.find_one({"rss":"tweet"})
tiempo2=t[u'ult_act']
if((tiempo2- tiempo1)>600):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
result = api.user_timeline("torresjTIC")
tweet=[]
for status in result:
geo=status.geo
if geo!=None:
tweet.append([status.text,[geo[u'coordinates'][0],geo[u'coordinates'][1]]])
cache.update({"rss": "tweet"}, {"$set": {"ult_act": time.time()}})
conn.close()
#Variable para RSS, también almacenamos el momento en que se descargo el rss
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
actualiza_tiempo()
#Conectamos con tweeter para obtener los twits
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
result = api.user_timeline("torresjTIC")
tweet=[]
for status in result:
geo=status.geo
if geo!=None:
print status.text
tweet.append([status.text,[geo[u'coordinates'][0],geo[u'coordinates'][1]]])
actualiza_tweet()
# funciones para usar como listas de dias meses y años
def dias():
x=[];
for n in range(1,32):
x.append(n)
return x
def meses():
x=[];
for n in range(1,13):
x.append(n)
return x
def anios():
x=[];
for n in range(1940,2014):
x.append(n)
return x
meses31=['1','3','4','7','8','10','12']
meses30=['5','6','9','11']
#Validadores
vpass=form.regexp(r'.{7,20}$',"La contrasenia debe tener mas de 7 caracteres")
vemail=form.regexp(r'\b[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}\b',"Introduzca una direccion de correo valida")
#Formulario Para el login
formul = form.Form(
form.Textbox("user",form.notnull,description = "Usuario:"),
form.Password("password",form.notnull,vpass,description = "Contraseña:"),
form.Button("Login")
)
class Contacto:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=0
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Localizacion"
cuerpo="Cuerpoooooooooooooooooooooo"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
modo="contacto"
error=''
actualiza_tiempo()
actualiza_tweet()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet)
def POST(self):
login=formul()
registro=0
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Localizacion"
cuerpo="Cuerpo00oooooo"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
modo="contacto"
error=''
actualiza_tiempo()
actualiza_tweet()
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet) | gpl-2.0 |
hynekcer/django | tests/foreign_object/test_empty_join.py | 232 | 1498 | from django.test import TestCase
from .models import SlugPage
class RestrictedConditionsTests(TestCase):
def setUp(self):
slugs = [
'a',
'a/a',
'a/b',
'a/b/a',
'x',
'x/y/z',
]
SlugPage.objects.bulk_create([SlugPage(slug=slug) for slug in slugs])
def test_restrictions_with_no_joining_columns(self):
"""
Test that it's possible to create a working related field that doesn't
use any joining columns, as long as an extra restriction is supplied.
"""
a = SlugPage.objects.get(slug='a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(ascendants=a)],
['a', 'a/a', 'a/b', 'a/b/a'],
)
self.assertEqual(
[p.slug for p in a.descendants.all()],
['a', 'a/a', 'a/b', 'a/b/a'],
)
aba = SlugPage.objects.get(slug='a/b/a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(descendants__in=[aba])],
['a', 'a/b', 'a/b/a'],
)
self.assertListEqual(
[p.slug for p in aba.ascendants.all()],
['a', 'a/b', 'a/b/a'],
)
def test_empty_join_conditions(self):
x = SlugPage.objects.get(slug='x')
message = "Join generated an empty ON clause."
with self.assertRaisesMessage(ValueError, message):
list(SlugPage.objects.filter(containers=x))
| bsd-3-clause |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 1 | 5990 | # urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
tuple where the MIME type is optional. For example: ::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
`k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(
['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
malja/cvut-python | cviceni07/frequency.py | 1 | 1393 | import sys
from collections import Counter
def loadFile( filename ):
words = []
with open(filename) as file:
for line in file:
# Kvůli poslednímu slovu
line += " "
word = ""
for char in line:
if not char.isalpha():
if len(word) > 0:
words.append( word.lower() )
word = ""
continue
word += char
return words
def countWordFrequencies( words ):
words_sorted = sorted( words )
unique_words = sorted( set( words_sorted ) )
frequencies = []
for unique in unique_words:
frequencies.append(0)
while True:
if words_sorted[0] == unique:
frequencies[-1] += 1
words_sorted.pop(0)
if len( words_sorted ) == 0:
break
else:
break
return frequencies, unique_words
def printOutput( frequencies, words ):
max_frequency = max( frequencies )
for i in range( len( words ) ):
print( "{:>14}:".format( words[i] ), "*" * int( ( 50* frequencies[i] ) / max_frequency ), sep="" )
array = loadFile( sys.argv[1] )
if ( len(array) == 0 ):
exit()
freq, uniq = countWordFrequencies( array )
printOutput( freq, uniq ) | mit |
erikdejonge/newsrivr | daemons/d_checkdoubleusernames.py | 1 | 3053 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from d_utils import *
def clog(s):
s= str(s)
print('\033[%96m'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+": "+s+'\033[%0m')
def checkDoubleUsernames(exis_user_screen_name):
users_same_name = []
# find out if there are more users with the same screenname, this can happen after an deny.
crs = getCollUsers().find({"screen_name":exis_user_screen_name}, sort=[("date_created", 1)])
for i in crs:
users_same_name.append(i)
#TODO dit checken tegen d_sitenewusers
old_newsrivr_userid_md5 = None
new_newsrivr_userid_md5 = None
if len(users_same_name)>1:
old_newsrivr_userid_md5 = users_same_name[0]["newsrivr_userid_md5"]
new_newsrivr_userid_md5 = users_same_name[len(users_same_name)-1]["newsrivr_userid_md5"]
if "closed_drops" in users_same_name[0]:
users_same_name[len(users_same_name)-1]["closed_drops"] = users_same_name[0]["closed_drops"]
if "share_data" in users_same_name[0]:
users_same_name[len(users_same_name)-1]["share_data"] = users_same_name[0]["share_data"]
getCollUsers().save(users_same_name[len(users_same_name)-1], safe=True)
else:
return
if old_newsrivr_userid_md5 and new_newsrivr_userid_md5:
cnt = 0
for d in getCollDrops().find({"newsrivr_userid_md5":old_newsrivr_userid_md5}):
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
d["newsrivr_userid_md5"].remove(old_newsrivr_userid_md5)
d["newsrivr_userid_md5"].append(new_newsrivr_userid_md5)
for i in getCollDrops().find({"id_str":d["id_str"]}):
if i["_id"]!=d["_id"]:
d["newsrivr_userid_md5"].extend(i["newsrivr_userid_md5"])
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
getCollDrops().remove({"_id":pymongo.objectid.ObjectId(i["_id"])}, safe=True)
getCollDrops().save(d, safe=True)
cnt += 1
if cnt%100==0:
clog("user changed md5, correcting: "+ str(cnt))
for u in users_same_name:
if u["newsrivr_userid_md5"]!=new_newsrivr_userid_md5:
getCollUsers().remove({"_id":pymongo.objectid.ObjectId(u["_id"])}, safe=True)
drops_to_remove = []
for d in getCollDrops().find({"newsrivr_userid_md5":u["newsrivr_userid_md5"]}):
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
d["newsrivr_userid_md5"].remove(u["newsrivr_userid_md5"])
if len(d["newsrivr_userid_md5"])==0:
drops_to_remove.append(d["id_str"])
else:
if getCollDrops().find(d).count()>0:
getCollDrops().remove(d, safe=True)
else:
getCollDrops().save(d, safe=True)
deleteDrops(drops_to_remove)
def main():
while True:
for u in getCollUsers().find():
if "screen_name" in u:
checkDoubleUsernames(u["screen_name"])
time.sleep(20)
if __name__=="__main__":
clog("check if double names exists")
driver(main, inspect.getfile(inspect.currentframe()))
| gpl-2.0 |
nikoonia/gem5v | configs/common/Benchmarks.py | 36 | 6206 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from SysPaths import script, disk, binary
from os import environ as env
from m5.defines import buildEnv
class SysConfig:
def __init__(self, script=None, mem=None, disk=None):
self.scriptname = script
self.diskname = disk
self.memsize = mem
def script(self):
if self.scriptname:
return script(self.scriptname)
else:
return ''
def mem(self):
if self.memsize:
return self.memsize
else:
return '128MB'
def disk(self):
if self.diskname:
return disk(self.diskname)
elif buildEnv['TARGET_ISA'] == 'alpha':
return env.get('LINUX_IMAGE', disk('linux-latest.img'))
elif buildEnv['TARGET_ISA'] == 'x86':
return env.get('LINUX_IMAGE', disk('x86root.img'))
elif buildEnv['TARGET_ISA'] == 'arm':
return env.get('LINUX_IMAGE', disk('linux-arm-ael.img'))
else:
print "Don't know what default disk image to use for %s ISA" % \
buildEnv['TARGET_ISA']
exit(1)
# Benchmarks are defined as a key in a dict which is a list of SysConfigs
# The first defined machine is the test system, the others are driving systems
Benchmarks = {
'PovrayBench': [SysConfig('povray-bench.rcS', '512MB', 'povray.img')],
'PovrayAutumn': [SysConfig('povray-autumn.rcS', '512MB', 'povray.img')],
'NetperfStream': [SysConfig('netperf-stream-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfStreamUdp': [SysConfig('netperf-stream-udp-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfUdpLocal': [SysConfig('netperf-stream-udp-local.rcS')],
'NetperfStreamNT': [SysConfig('netperf-stream-nt-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfMaerts': [SysConfig('netperf-maerts-client.rcS'),
SysConfig('netperf-server.rcS')],
'SurgeStandard': [SysConfig('surge-server.rcS', '512MB'),
SysConfig('surge-client.rcS', '256MB')],
'SurgeSpecweb': [SysConfig('spec-surge-server.rcS', '512MB'),
SysConfig('spec-surge-client.rcS', '256MB')],
'Nhfsstone': [SysConfig('nfs-server-nhfsstone.rcS', '512MB'),
SysConfig('nfs-client-nhfsstone.rcS')],
'Nfs': [SysConfig('nfs-server.rcS', '900MB'),
SysConfig('nfs-client-dbench.rcS')],
'NfsTcp': [SysConfig('nfs-server.rcS', '900MB'),
SysConfig('nfs-client-tcp.rcS')],
'IScsiInitiator': [SysConfig('iscsi-client.rcS', '512MB'),
SysConfig('iscsi-server.rcS', '512MB')],
'IScsiTarget': [SysConfig('iscsi-server.rcS', '512MB'),
SysConfig('iscsi-client.rcS', '512MB')],
'Validation': [SysConfig('iscsi-server.rcS', '512MB'),
SysConfig('iscsi-client.rcS', '512MB')],
'Ping': [SysConfig('ping-server.rcS',),
SysConfig('ping-client.rcS')],
'ValAccDelay': [SysConfig('devtime.rcS', '512MB')],
'ValAccDelay2': [SysConfig('devtimewmr.rcS', '512MB')],
'ValMemLat': [SysConfig('micro_memlat.rcS', '512MB')],
'ValMemLat2MB': [SysConfig('micro_memlat2mb.rcS', '512MB')],
'ValMemLat8MB': [SysConfig('micro_memlat8mb.rcS', '512MB')],
'ValMemLat': [SysConfig('micro_memlat8.rcS', '512MB')],
'ValTlbLat': [SysConfig('micro_tlblat.rcS', '512MB')],
'ValSysLat': [SysConfig('micro_syscall.rcS', '512MB')],
'ValCtxLat': [SysConfig('micro_ctx.rcS', '512MB')],
'ValStream': [SysConfig('micro_stream.rcS', '512MB')],
'ValStreamScale': [SysConfig('micro_streamscale.rcS', '512MB')],
'ValStreamCopy': [SysConfig('micro_streamcopy.rcS', '512MB')],
'MutexTest': [SysConfig('mutex-test.rcS', '128MB')],
'ArmAndroid-GB': [SysConfig('null.rcS', '256MB',
'ARMv7a-Gingerbread-Android.SMP.mouse.nolock.clean.img')],
'bbench-gb': [SysConfig('bbench-gb.rcS', '256MB',
'ARMv7a-Gingerbread-Android.SMP.mouse.nolock.img')],
'ArmAndroid-ICS': [SysConfig('null.rcS', '256MB',
'ARMv7a-ICS-Android.SMP.nolock.clean.img')],
'bbench-ics': [SysConfig('bbench-ics.rcS', '256MB',
'ARMv7a-ICS-Android.SMP.nolock.img')]
}
benchs = Benchmarks.keys()
benchs.sort()
DefinedBenchmarks = ", ".join(benchs)
| bsd-3-clause |
fjbatresv/odoo | addons/website_sale_delivery/controllers/main.py | 124 | 1551 | # -*- coding: utf-8 -*-
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.website_sale.controllers.main
class website_sale(openerp.addons.website_sale.controllers.main.website_sale):
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(context=context)
carrier_id = post.get('carrier_id')
if carrier_id:
carrier_id = int(carrier_id)
if order:
request.registry['sale.order']._check_carrier_quotation(cr, uid, order, force_carrier_id=carrier_id, context=context)
if carrier_id:
return request.redirect("/shop/payment")
res = super(website_sale, self).payment(**post)
return res
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
order_lines_not_delivery = [line for line in order_lines if not line.is_delivery]
return super(website_sale, self).order_lines_2_google_api(order_lines_not_delivery)
def order_2_return_dict(self, order):
""" Returns the tracking_cart dict of the order for Google analytics """
ret = super(website_sale, self).order_2_return_dict(order)
for line in order.order_line:
if line.is_delivery:
ret['transaction']['shipping'] = line.price_unit
return ret
| agpl-3.0 |
bcarroll/authmgr | python-3.6.2-Win64/Lib/site-packages/werkzeug/contrib/securecookie.py | 91 | 12174 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| bsd-3-clause |
jeremiahmarks/Todo.txt-python | tests/base.py | 2 | 3810 | # TODO.TXT-CLI-python test script
# Copyright (C) 2011-2012 Sigmavirus24, Jeff Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# TLDR: This is licensed under the GPLv3. See LICENSE for more details.
# Common functions for test_*_todo.py
import datetime
import os
import re
import sys
import unittest
sys.path.insert(0, '..')
import todo
todotxt = todo.CONFIG["TODO_FILE"] = "test_todo.txt"
donetxt = todo.CONFIG["DONE_FILE"] = "test_done.txt"
class BaseTest(unittest.TestCase):
num = 50
def default_config(self):
pass
def setUp(self):
todo.CONFIG["PRE_DATE"] = False
todo.CONFIG["TODO_PY"] = "testing"
todo.default_config = self.default_config
sys.stdout = open(os.devnull, 'w')
open(todotxt, "w+").close()
open(donetxt, "w+").close()
def tearDown(self):
sys.stdout = sys.__stdout__
if os.path.isfile(todotxt):
os.unlink(todotxt)
if os.path.isfile(donetxt):
os.unlink(donetxt)
def count_matches(self, regexp=None):
count = 0
for line in todo.iter_todos():
if regexp == None or re.match(regexp, line):
count += 1
return count
def _test_lines_no_pri(self, num):
return ["Test {0}".format(i) for i in range(0, num)]
def _test_lines_pri(self, num):
n = len(todo.PRIORITIES)
p = todo.PRIORITIES
return ["({0}) Test {1}".format(p[i % n], i) for i in range(0, num)]
def _test_lines_date(self, num):
l = self._test_lines_pri(num)
m = []
start_date = datetime.date.today()
for d, l in zip((start_date + datetime.timedelta(n) for n in range(num)), l):
m.append(todo.concat([l, " #{%s}" % d.isoformat()]))
return m
def _test_lines_project(self, num):
projects = ["+foo", "+bar", "+bogus", "+github", "+school", "+work",
"+inthemorning", "+agenda", "+noagenda"]
n = len(projects)
l = self._test_lines_pri(num)
m = []
for i in range(0, num):
m.append(todo.concat([l[i], projects[i % n]], " "))
return m
def _test_lines_context(self, num):
projects = ["@foo", "@bar", "@bogus", "@github", "@school", "@work",
"@inthemorning", "@agenda", "@noagenda"]
n = len(projects)
l = self._test_lines_pri(num)
m = []
for i in range(0, num):
m.append(todo.concat([l[i], projects[i % n]], " "))
return m
def assertNumLines(self, exp, regexp=None):
c = self.count_matches(regexp)
self.assertEqual(exp, c)
def assertIsInstance(self, obj, cls, msg=None):
if sys.version_info >= (2, 7):
super(BaseTest, self).assertIsInstance(obj, cls, msg)
else:
self.assertTrue(isinstance(obj, cls))
def assertIsNotNone(self, expr, msg=None):
if sys.version_info >= (2, 7):
super(BaseTest, self).assertIsNotNone(expr, msg)
else:
if not expr:
self.fail(msg)
def force_print(self, message):
sys.stderr.write(''.join([message, '\n']))
sys.stderr.flush()
| gpl-3.0 |
Kha/flask-admin | examples/layout_bootstrap3/app.py | 43 | 6109 | import os
import os.path as op
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_admin as admin
from flask_admin.contrib.sqla import ModelView
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Models
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
email = db.Column(db.Unicode(64))
def __unicode__(self):
return self.name
class Page(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode(64))
content = db.Column(db.UnicodeText)
def __unicode__(self):
return self.name
# Customized admin interface
class CustomView(ModelView):
list_template = 'list.html'
create_template = 'create.html'
edit_template = 'edit.html'
class UserAdmin(CustomView):
column_searchable_list = ('name',)
column_filters = ('name', 'email')
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create admin with custom base template
admin = admin.Admin(app, 'Example: Layout-BS3', base_template='layout.html', template_mode='bootstrap3')
# Add views
admin.add_view(UserAdmin(User, db.session))
admin.add_view(CustomView(Page, db.session))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
for i in range(len(first_names)):
user = User()
user.name = first_names[i] + " " + last_names[i]
user.email = first_names[i].lower() + "@example.com"
db.session.add(user)
sample_text = [
{
'title': "de Finibus Bonorum et Malorum - Part I",
'content': "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \
mollit anim id est laborum."
},
{
'title': "de Finibus Bonorum et Malorum - Part II",
'content': "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque \
laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto \
beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur \
aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi \
nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, \
adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam \
aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam \
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum \
iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum \
qui dolorem eum fugiat quo voluptas nulla pariatur?"
},
{
'title': "de Finibus Bonorum et Malorum - Part III",
'content': "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium \
voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati \
cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id \
est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam \
libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod \
maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. \
Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet \
ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur \
a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis \
doloribus asperiores repellat."
}
]
for entry in sample_text:
page = Page()
page.title = entry['title']
page.content = entry['content']
db.session.add(page)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = op.realpath(os.path.dirname(__file__))
database_path = op.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
| bsd-3-clause |
testmana2/test | Helpviewer/HelpSearchWidget.py | 2 | 4654 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a window for showing the QtHelp index.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, Qt, QEvent, QUrl
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QTextBrowser, QApplication, \
QMenu
class HelpSearchWidget(QWidget):
"""
Class implementing a window for showing the QtHelp index.
@signal linkActivated(QUrl) emitted when a search result entry is activated
@signal escapePressed() emitted when the ESC key was pressed
"""
linkActivated = pyqtSignal(QUrl)
escapePressed = pyqtSignal()
def __init__(self, engine, mainWindow, parent=None):
"""
Constructor
@param engine reference to the help search engine (QHelpSearchEngine)
@param mainWindow reference to the main window object (QMainWindow)
@param parent reference to the parent widget (QWidget)
"""
super(HelpSearchWidget, self).__init__(parent)
self.__engine = engine
self.__mw = mainWindow
self.__layout = QVBoxLayout(self)
self.__result = self.__engine.resultWidget()
self.__query = self.__engine.queryWidget()
self.__layout.addWidget(self.__query)
self.__layout.addWidget(self.__result)
self.setFocusProxy(self.__query)
self.__query.search.connect(self.__search)
self.__result.requestShowLink.connect(self.linkActivated)
self.__engine.searchingStarted.connect(self.__searchingStarted)
self.__engine.searchingFinished.connect(self.__searchingFinished)
self.__browser = self.__result.findChildren(QTextBrowser)[0]
if self.__browser:
self.__browser.viewport().installEventFilter(self)
def __search(self):
"""
Private slot to perform a search of the database.
"""
query = self.__query.query()
self.__engine.search(query)
def __searchingStarted(self):
"""
Private slot to handle the start of a search.
"""
QApplication.setOverrideCursor(Qt.WaitCursor)
def __searchingFinished(self, hits):
"""
Private slot to handle the end of the search.
@param hits number of hits (integer) (unused)
"""
QApplication.restoreOverrideCursor()
def eventFilter(self, watched, event):
"""
Public method called to filter the event queue.
@param watched the QObject being watched (QObject)
@param event the event that occurred (QEvent)
@return flag indicating whether the event was handled (boolean)
"""
if self.__browser and watched == self.__browser.viewport() and \
event.type() == QEvent.MouseButtonRelease:
link = self.__result.linkAt(event.pos())
if not link.isEmpty() and link.isValid():
ctrl = event.modifiers() & Qt.ControlModifier
if (event.button() == Qt.LeftButton and ctrl) or \
event.button() == Qt.MidButton:
self.__mw.newTab(link)
return QWidget.eventFilter(self, watched, event)
def keyPressEvent(self, evt):
"""
Protected method handling key press events.
@param evt reference to the key press event (QKeyEvent)
"""
if evt.key() == Qt.Key_Escape:
self.escapePressed.emit()
else:
evt.ignore()
def contextMenuEvent(self, evt):
"""
Protected method handling context menu events.
@param evt reference to the context menu event (QContextMenuEvent)
"""
point = evt.globalPos()
if self.__browser:
point = self.__browser.mapFromGlobal(point)
if not self.__browser.rect().contains(point, True):
return
link = QUrl(self.__browser.anchorAt(point))
else:
point = self.__result.mapFromGlobal(point)
link = self.__result.linkAt(point)
if link.isEmpty() or not link.isValid():
return
menu = QMenu()
curTab = menu.addAction(self.tr("Open Link"))
newTab = menu.addAction(self.tr("Open Link in New Tab"))
menu.move(evt.globalPos())
act = menu.exec_()
if act == curTab:
self.linkActivated.emit(link)
elif act == newTab:
self.__mw.newTab(link)
| gpl-3.0 |
Judystudy/gooderp_addons | sell/report/sell_summary_goods.py | 6 | 6707 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
import odoo.addons.decimal_precision as dp
import datetime
class SellSummaryGoods(models.Model):
_name = 'sell.summary.goods'
_inherit = 'report.base'
_description = u'销售汇总表(按商品)'
id_lists = fields.Text(u'移动明细行id列表')
goods_categ = fields.Char(u'商品类别')
goods_code = fields.Char(u'商品编码')
goods = fields.Char(u'商品名称')
attribute = fields.Char(u'属性')
warehouse = fields.Char(u'仓库')
qty_uos = fields.Float(u'辅助数量', digits=dp.get_precision('Quantity'))
uos = fields.Char(u'辅助单位')
qty = fields.Float(u'基本数量', digits=dp.get_precision('Quantity'))
uom = fields.Char(u'基本单位')
price = fields.Float(u'单价', digits=dp.get_precision('Price'))
amount = fields.Float(u'销售收入', digits=dp.get_precision('Amount'))
tax_amount = fields.Float(u'税额', digits=dp.get_precision('Amount'))
subtotal = fields.Float(u'价税合计', digits=dp.get_precision('Amount'))
margin = fields.Float(u'毛利', digits=dp.get_precision('Amount'))
def select_sql(self, sql_type='out'):
return '''
SELECT MIN(wml.id) as id,
array_agg(wml.id) AS id_lists,
categ.name AS goods_categ,
goods.code AS goods_code,
goods.name AS goods,
attr.name AS attribute,
wh.name AS warehouse,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_uos_qty
ELSE - wml.goods_uos_qty END) AS qty_uos,
uos.name AS uos,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) AS qty,
uom.name AS uom,
(CASE WHEN SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) = 0 THEN 0
ELSE
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END)
/ SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END)
END) AS price,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END) AS amount,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.tax_amount
ELSE - wml.tax_amount END) AS tax_amount,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.subtotal
ELSE - wml.subtotal END) AS subtotal,
(SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END) - SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) * wml.cost_unit) AS margin
'''
def from_sql(self, sql_type='out'):
return '''
FROM wh_move_line AS wml
LEFT JOIN wh_move wm ON wml.move_id = wm.id
LEFT JOIN partner ON wm.partner_id = partner.id
LEFT JOIN goods ON wml.goods_id = goods.id
LEFT JOIN core_category AS categ ON goods.category_id = categ.id
LEFT JOIN attribute AS attr ON wml.attribute_id = attr.id
LEFT JOIN warehouse AS wh ON wml.warehouse_id = wh.id
OR wml.warehouse_dest_id = wh.id
LEFT JOIN uom AS uos ON goods.uos_id = uos.id
LEFT JOIN uom ON goods.uom_id = uom.id
'''
def where_sql(self, sql_type='out'):
extra = ''
if self.env.context.get('partner_id'):
extra += 'AND partner.id = {partner_id}'
if self.env.context.get('goods_id'):
extra += 'AND goods.id = {goods_id}'
if self.env.context.get('goods_categ_id'):
extra += 'AND categ.id = {goods_categ_id}'
if self.env.context.get('warehouse_id'):
extra += 'AND wh.id = {warehouse_id}'
return '''
WHERE wml.state = 'done'
AND wml.date >= '{date_start}'
AND wml.date < '{date_end}'
AND wm.origin like 'sell.delivery%%'
AND wh.type = 'stock'
%s
''' % extra
def group_sql(self, sql_type='out'):
return '''
GROUP BY goods_categ,goods_code,goods,attribute,warehouse,uos,uom,wml.cost_unit
'''
def order_sql(self, sql_type='out'):
return '''
ORDER BY goods_code,goods,attribute,warehouse
'''
def get_context(self, sql_type='out', context=None):
date_end = datetime.datetime.strptime(
context.get('date_end'), '%Y-%m-%d') + datetime.timedelta(days=1)
date_end = date_end.strftime('%Y-%m-%d')
return {
'date_start': context.get('date_start') or '',
'date_end': date_end,
'partner_id': context.get('partner_id') and context.get('partner_id')[0] or '',
'goods_id': context.get('goods_id') and context.get('goods_id')[0] or '',
'goods_categ_id': context.get('goods_categ_id') and context.get('goods_categ_id')[0] or '',
'warehouse_id': context.get('warehouse_id') and context.get('warehouse_id')[0] or '',
}
def _compute_order(self, result, order):
order = order or 'goods_code ASC'
return super(SellSummaryGoods, self)._compute_order(result, order)
def collect_data_by_sql(self, sql_type='out'):
collection = self.execute_sql(sql_type='out')
return collection
@api.multi
def view_detail(self):
'''销售汇总表(按商品)查看明细按钮'''
self.ensure_one()
line_ids = []
res = []
move_lines = []
result = self.get_data_from_cache()
for line in result:
if line.get('id') == self.id:
line_ids = line.get('id_lists')
move_lines = self.env['wh.move.line'].search(
[('id', 'in', line_ids)])
for move_line in move_lines:
details = self.env['sell.order.detail'].search(
[('order_name', '=', move_line.move_id.name),
('goods_id', '=', move_line.goods_id.id)])
for detail in details:
res.append(detail.id)
return {
'name': u'销售明细表',
'view_mode': 'tree',
'view_id': False,
'res_model': 'sell.order.detail',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', res)],
}
| agpl-3.0 |
zerkrx/zerkbox | lib/youtube_dl/extractor/disney.py | 21 | 6696 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
compat_str,
determine_ext,
ExtractorError,
)
class DisneyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))'''
_TESTS = [{
# Disney.EmbedVideo
'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977',
'info_dict': {
'id': '545ed1857afee5a0ec239977',
'ext': 'mp4',
'title': 'Moana - Trailer',
'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7',
'upload_date': '20170112',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# Grill.burger
'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette',
'info_dict': {
'id': '5454e9f4e9804a552e3524c8',
'ext': 'mp4',
'title': '"Intro" Featurette: Rogue One: A Star Wars Story',
'upload_date': '20170104',
'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2',
'only_matching': True,
}, {
'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114',
'only_matching': True,
}, {
'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d',
'only_matching': True,
}, {
'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1',
'only_matching': True,
}, {
'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo',
'only_matching': True,
}, {
'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue',
'only_matching': True,
}]
def _real_extract(self, url):
domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
grill = re.sub(r'"\s*\+\s*"', '', self._search_regex(
r'Grill\.burger\s*=\s*({.+})\s*:',
webpage, 'grill data'))
page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video')
video_data = page_data['data'][0]
else:
webpage = self._download_webpage(
'http://%s/embed/%s' % (domain, video_id), video_id)
page_data = self._parse_json(self._search_regex(
r'Disney\.EmbedVideo\s*=\s*({.+});',
webpage, 'embed data'), video_id)
video_data = page_data['video']
for external in video_data.get('externals', []):
if external.get('source') == 'vevo':
return self.url_result('vevo:' + external['data_id'], 'Vevo')
video_id = video_data['id']
title = video_data['title']
formats = []
for flavor in video_data.get('flavors', []):
flavor_format = flavor.get('format')
flavor_url = flavor.get('url')
if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access':
continue
tbr = int_or_none(flavor.get('bitrate'))
if tbr == 99999:
formats.extend(self._extract_m3u8_formats(
flavor_url, video_id, 'mp4',
m3u8_id=flavor_format, fatal=False))
continue
format_id = []
if flavor_format:
format_id.append(flavor_format)
if tbr:
format_id.append(compat_str(tbr))
ext = determine_ext(flavor_url)
if flavor_format == 'applehttp' or ext == 'm3u8':
ext = 'mp4'
width = int_or_none(flavor.get('width'))
height = int_or_none(flavor.get('height'))
formats.append({
'format_id': '-'.join(format_id),
'url': flavor_url,
'width': width,
'height': height,
'tbr': tbr,
'ext': ext,
'vcodec': 'none' if (width == 0 and height == 0) else None,
})
if not formats and video_data.get('expired'):
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']),
expected=True)
self._sort_formats(formats)
subtitles = {}
for caption in video_data.get('captions', []):
caption_url = caption.get('url')
caption_format = caption.get('format')
if not caption_url or caption_format.startswith('unknown'):
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'url': caption_url,
'ext': {
'webvtt': 'vtt',
}.get(caption_format, caption_format),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description') or video_data.get('short_desc'),
'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'),
'duration': int_or_none(video_data.get('duration_sec')),
'upload_date': unified_strdate(video_data.get('publish_date')),
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
mastizada/kuma | kuma/wiki/migrations/0023_attachment_m2m.py | 5 | 17911 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DocumentAttachment'
db.create_table('wiki_documentattachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.Attachment'])),
('document', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Document'])),
('attached_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('attachments', ['DocumentAttachment'])
def backwards(self, orm):
# Deleting model 'DocumentAttachment'
db.delete_table('wiki_documentattachment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'attachments.attachment': {
'Meta': {'object_name': 'Attachment', 'db_table': "'wiki_attachment'"},
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_rev'", 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mindtouch_attachment_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'object_name': 'AttachmentRevision', 'db_table': "'wiki_attachmentrevision'"},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['attachments.Attachment']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_attachment_revisions'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'defer_rendering': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['attachments.Attachment']", 'through': "orm['wiki.DocumentAttachment']", 'symmetrical': 'False'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_rendered_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'mindtouch_page_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'parent_topic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}),
'render_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'render_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'rendered_errors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rendered_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wiki.documentattachment': {
'Meta': {'object_name': 'DocumentAttachment'},
'attached_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'wiki.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.editortoolbar': {
'Meta': {'object_name': 'EditorToolbar'},
'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.firefoxversion': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'FirefoxVersion'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'firefox_version_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.operatingsystem': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'OperatingSystem'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'operating_system_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.relateddocument': {
'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_common': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"})
},
'wiki.reviewtag': {
'Meta': {'object_name': 'ReviewTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.reviewtaggedrevision': {
'Meta': {'object_name': 'ReviewTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'show_toc': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.taggeddocument': {
'Meta': {'object_name': 'TaggedDocument'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.DocumentTag']"})
}
}
complete_apps = ['wiki']
| mpl-2.0 |
hernandito/SickRage | lib/sqlalchemy/dialects/postgresql/zxjdbc.py | 79 | 1395 | # postgresql/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
:driverurl: http://jdbc.postgresql.org/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import PGDialect, PGExecutionContext
class PGExecutionContext_zxjdbc(PGExecutionContext):
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
jdbc_db_name = 'postgresql'
jdbc_driver_name = 'org.postgresql.Driver'
execution_ctx_cls = PGExecutionContext_zxjdbc
supports_native_decimal = True
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
parts = connection.connection.dbversion.split('.')
return tuple(int(x) for x in parts)
dialect = PGDialect_zxjdbc
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/win32/lib/pywintypes.py | 6 | 7120 | # Magic utility that "redirects" to pywintypesxx.dll
import imp, sys, os
def __import_pywin32_system_module__(modname, globs):
# This has been through a number of iterations. The problem: how to
# locate pywintypesXX.dll when it may be in a number of places, and how
# to avoid ever loading it twice. This problem is compounded by the
# fact that the "right" way to do this requires win32api, but this
# itself requires pywintypesXX.
# And the killer problem is that someone may have done 'import win32api'
# before this code is called. In that case Windows will have already
# loaded pywintypesXX as part of loading win32api - but by the time
# we get here, we may locate a different one. This appears to work, but
# then starts raising bizarre TypeErrors complaining that something
# is not a pywintypes type when it clearly is!
# So in what we hope is the last major iteration of this, we now
# rely on a _win32sysloader module, implemented in C but not relying
# on pywintypesXX.dll. It then can check if the DLL we are looking for
# lib is already loaded.
if not sys.platform.startswith("win32"):
# These extensions can be built on Linux via the 'mainwin' toolkit.
# Look for a native 'lib{modname}.so'
# NOTE: The _win32sysloader module will probably build in this
# environment, so it may be better to use that here too.
for ext, mode, ext_type in imp.get_suffixes():
if ext_type==imp.C_EXTENSION:
for path in sys.path:
look = os.path.join(path, "lib" + modname + ext)
if os.path.isfile(look):
mod = imp.load_module(modname, None, look,
(ext, mode, ext_type))
# and fill our namespace with it.
# XXX - if this ever moves to py3k, this will probably
# need similar adjustments as below...
globs.update(mod.__dict__)
return
raise ImportError("No dynamic module " + modname)
# See if this is a debug build.
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
suffix = '_d'
break
else:
suffix = ""
filename = "%s%d%d%s.dll" % \
(modname, sys.version_info[0], sys.version_info[1], suffix)
if hasattr(sys, "frozen"):
# If we are running from a frozen program (py2exe, McMillan, freeze)
# then we try and load the DLL from our sys.path
# XXX - This path may also benefit from _win32sysloader? However,
# MarkH has never seen the DLL load problem with py2exe programs...
for look in sys.path:
# If the sys.path entry is a (presumably) .zip file, use the
# directory
if os.path.isfile(look):
look = os.path.dirname(look)
found = os.path.join(look, filename)
if os.path.isfile(found):
break
else:
raise ImportError("Module '%s' isn't in frozen sys.path %s" % (modname, sys.path))
else:
# First see if it already in our process - if so, we must use that.
import _win32sysloader
found = _win32sysloader.GetModuleFilename(filename)
if found is None:
# We ask Windows to load it next. This is in an attempt to
# get the exact same module loaded should pywintypes be imported
# first (which is how we are here) or if, eg, win32api was imported
# first thereby implicitly loading the DLL.
# Sadly though, it doesn't quite work - if pywintypesxx.dll
# is in system32 *and* the executable's directory, on XP SP2, an
# import of win32api will cause Windows to load pywintypes
# from system32, where LoadLibrary for that name will
# load the one in the exe's dir.
# That shouldn't really matter though, so long as we only ever
# get one loaded.
found = _win32sysloader.LoadModule(filename)
if found is None:
# Windows can't find it - which although isn't relevent here,
# means that we *must* be the first win32 import, as an attempt
# to import win32api etc would fail when Windows attempts to
# locate the DLL.
# This is most likely to happen for "non-admin" installs, where
# we can't put the files anywhere else on the global path.
# If there is a version in our Python directory, use that
if os.path.isfile(os.path.join(sys.prefix, filename)):
found = os.path.join(sys.prefix, filename)
if found is None:
# Not in the Python directory? Maybe we were installed via
# easy_install...
if os.path.isfile(os.path.join(os.path.dirname(__file__), filename)):
found = os.path.join(os.path.dirname(__file__), filename)
if found is None:
# We might have been installed via PIP and without the post-install
# script having been run, so they might be in the
# lib/site-packages/pywin32_system32 directory.
# This isn't ideal as it means, say 'python -c "import win32api"'
# will not work but 'python -c "import pywintypes, win32api"' will,
# but it's better than nothing...
import distutils.sysconfig
maybe = os.path.join(distutils.sysconfig.get_python_lib(plat_specific=1),
"pywin32_system32", filename)
if os.path.isfile(maybe):
found = maybe
if found is None:
# give up in disgust.
raise ImportError("No system module '%s' (%s)" % (modname, filename))
# py2k and py3k differences:
# On py2k, after doing "imp.load_module('pywintypes')", sys.modules
# is unchanged - ie, sys.modules['pywintypes'] still refers to *this*
# .py module - but the module's __dict__ has *already* need updated
# with the new module's contents.
# However, on py3k, sys.modules *is* changed - sys.modules['pywintypes']
# will be changed to the new module object.
# SO: * on py2k don't need to update any globals.
# * on py3k we update our module dict with the new module's dict and
# copy its globals to ours.
old_mod = sys.modules[modname]
# Python can load the module
mod = imp.load_dynamic(modname, found)
# Check the sys.modules[] behaviour we describe above is true...
if sys.version_info < (3,0):
assert sys.modules[modname] is old_mod
assert mod is old_mod
else:
assert sys.modules[modname] is not old_mod
assert sys.modules[modname] is mod
# as above - re-reset to the *old* module object then update globs.
sys.modules[modname] = old_mod
globs.update(mod.__dict__)
__import_pywin32_system_module__("pywintypes", globals())
| mit |
yavalvas/yav_com | build/matplotlib/examples/user_interfaces/embedding_in_wx2.py | 9 | 2706 | #!/usr/bin/env python
"""
An example of how to use wx or wxagg in an application with the new
toolbar - comment out the setA_toolbar line for no toolbar
"""
# Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
import matplotlib
# uncomment the following to use wx rather than wxagg
#matplotlib.use('WX')
#from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
# comment out the following to use wx rather than wxagg
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
self.axes.plot(t,s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() # comment this out for no toolbar
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| mit |
cbschaff/NBP | src/models/beacon10.py | 1 | 1853 | """
Copyright (C) 2017 Charles Schaff, David Yunis, Ayan Chakrabarti,
Matthew R. Walter. See LICENSE.txt for details.
"""
# Beacon model 10: fixed beacons of 8 channels in alternating clusters (of different subsets of 16)
import tensorflow as tf
import numpy as np
# Use with 8 channels
wn=1
def beacon(self):
NCHAN=self.NCHAN
v = np.zeros((25,25,9),dtype=np.float32)
v[:,:,0] = 0.5
y = [[ 4, 3, 5, 1],
[ 7, 2, 6, 8,],
[ 3, 6, 5, 4,],
[ 8, 4, 1, 7,],
[ 6, 8, 5, 3,],
[ 2, 4, 3, 7,],
[ 1, 2, 5, 6,],
[ 3, 8, 2, 7,],
[ 8, 5, 6, 4,],
[ 7, 1, 2, 3,],
[ 8, 3, 6, 2,],
[ 5, 4, 1, 7,],
[ 3, 7, 8, 6,],
[ 2, 1, 3, 4,],
[ 8, 2, 7, 6,],
[ 5, 1, 3, 4,],
[ 8, 2, 7, 5,],
[ 3, 4, 6, 1,],
[ 1, 2, 6, 8,],
[ 5, 7, 3, 4,],
[ 7, 2, 8, 1,],
[ 4, 5, 6, 3,],
[ 8, 2, 3, 7,],
[ 5, 1, 6, 4,],
[ 2, 3, 8, 1,],
[ 4, 7, 6, 5,],
[ 8, 3, 1, 7,],
[ 2, 6, 7, 8,],
[ 1, 3, 5, 4,],
[ 2, 7, 8, 6,],
[ 8, 3, 2, 7,],
[ 4, 6, 5, 1,],
[ 8, 4, 3, 7,],
[ 6, 2, 5, 1,],
[ 4, 3, 7, 2,],
[ 8, 5, 6, 1,]]
x= np.array(y)
for i in xrange(6):
for j in xrange(6):
v[2+4*i,2+4*j,x[6*i+j,0]] = 1.0
v[3+4*i,2+4*j,x[6*i+j,1]] = 1.0
v[2+4*i,3+4*j,x[6*i+j,2]] = 1.0
v[3+4*i,3+4*j,x[6*i+j,3]] = 1.0
v = np.reshape(v,(9*625))
lgsen = tf.Variable(v,trainable=False)
self.weights['sensor'] = lgsen
self.entropy = tf.constant(0)
lgsen = tf.reshape(lgsen,[1,self.NTX,NCHAN+1,1])
lgout = tf.to_float(tf.equal(lgsen,tf.nn.max_pool(lgsen,\
[1,1,self.NCHAN+1,1],[1,1,self.NCHAN+1,1],'VALID')))
lgout = tf.reshape(lgout,[1,1,self.NTX,NCHAN+1])
lgout = tf.slice(lgout,begin=[0,0,0,1],size=[-1,-1,-1,-1])
return lgout
| gpl-3.0 |
msreis/SigNetSim | signetsim/views/auth/ActivateAccountView.py | 2 | 2778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" ActivateAccountView.py
This file ...
"""
from django.views.generic import TemplateView
from signetsim.views.HasUserLoggedIn import HasUserLoggedIn
from signetsim.models import User
from django.core.mail import send_mail
from django.conf import settings
class ActivateAccountView(TemplateView, HasUserLoggedIn):
template_name = 'accounts/activate_account.html'
def __init__(self, **kwargs):
TemplateView.__init__(self, **kwargs)
HasUserLoggedIn.__init__(self, **kwargs)
self.activated = False
def get_context_data(self, **kwargs):
kwargs['activated'] = self.activated
return kwargs
def get(self, request, *args, **kwargs):
if (request.user.is_staff is True
and request.GET.get('username') != None
and User.objects.filter(username=request.GET['username']).exists()):
t_user = User.objects.get(username=request.GET['username'])
t_user.is_active = True
t_user.save()
# For test runs
if 'HTTP_HOST' in request.META:
self.sendUserEmail(request, t_user.username, t_user.email)
self.activated = True
return TemplateView.get(self, request, *args, **kwargs)
def sendUserEmail(self, request, username, email):
url = settings.BASE_URL
if "HTTP_X_SCRIPT_NAME" in request.META and request.META['HTTP_X_SCRIPT_NAME'] != "":
url = str(request.META['HTTP_X_SCRIPT_NAME']) + url
if "HTTP_X_SCHEME" in request.META and request.META['HTTP_X_SCHEME'] != "":
url = "%s://%s%s" % (str(request.META['HTTP_X_SCHEME']), request.META['HTTP_HOST'], url)
else:
url = "%s://%s%s" % (request.scheme, request.META['HTTP_HOST'], url)
login_url = "%saccounts/login/" % url
send_mail(
subject='SigNetSim user account activated',
message='',
html_message='Dear %s, <br/><br/>Your SigNetSim account has just been activated ! <br>You can start using it right now, by going to the page <br/>%s<br/>' % (
username, login_url),
from_email=settings.EMAIL_ADDRESS,
recipient_list=[email],
fail_silently=True,
) | agpl-3.0 |
pyzaba/pyzaba | lib/requests/packages/charade/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| bsd-3-clause |
Jin-W-FS/chinese-words-segmentation-test | WordDict.py | 1 | 1045 | #!/usr/bin/env python
# *- coding: utf-8 -*-
PUNCTUATIONS = (set(u'''`~!@#$%^&*()_+-={}[]|\:";'<>?,./ ''') |
set(u'''~`!@#¥%……&*()——+-=『』【】、‘’“”:;《》?,。/''')) - \
set(u'''_''') # not punctuation
class WordDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.total = sum(self[k][0] for k in self)
self.word_maxlen = max(len(k) for k in self)
self.punctuations = PUNCTUATIONS
def add(self, word, freq = 1, attr = ''):
if not word in self:
self[word] = (0, '')
if len(word) > self.word_maxlen:
self.word_maxlen = len(word)
f, a = self[word]
self[word] = (f + freq, a | set(attr))
set.total += freq
@staticmethod
def Load(file, encoding = 'utf-8'):
d = {}
for line in file:
s = line.decode(encoding).split()
d[s[0]] = (int(s[1]), set(s[2]))
return WordDict(d)
| gpl-2.0 |
SU-ECE-17-7/ibeis | ibeis/web/routes.py | 1 | 58514 | # -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
import math
from flask import request, current_app, url_for
from ibeis.control import controller_inject
from ibeis import constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY
from ibeis.web import appfuncs as appf
from ibeis.web import routes_ajax
import utool as ut
import vtool as vt
import numpy as np
register_route = controller_inject.get_ibeis_flask_route(__name__)
@register_route('/', methods=['GET'])
def root():
return appf.template(None)
@register_route('/view/', methods=['GET'])
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
def filter_annots_imageset(aid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return aid_list
imgsetids_list = ibs.get_annot_imgsetids(aid_list)
aid_list = [
aid
for aid, imgsetid_list_ in zip(aid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return aid_list
def filter_images_imageset(gid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR IMAGE FILTERING')
return gid_list
imgsetids_list = ibs.get_image_imgsetids(gid_list)
gid_list = [
gid
for gid, imgsetid_list_ in zip(gid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return gid_list
def filter_names_imageset(nid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return nid_list
aids_list = ibs.get_name_aids(nid_list)
imgsetids_list = [
set(ut.flatten(ibs.get_annot_imgsetids(aid_list)))
for aid_list in aids_list
]
nid_list = [
nid
for nid, imgsetid_list_ in zip(nid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return nid_list
ibs = current_app.ibs
filter_kw = {
'multiple': None,
'minqual': 'good',
'is_known': True,
'min_pername': 1,
'view': ['right'],
}
aid_list = ibs.get_valid_aids()
aid_list = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list = filter_annots_imageset(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
unixtime_list = ibs.get_image_unixtime(gid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
flagged_date_list = ['2016/01/29', '2016/01/30', '2016/01/31', '2016/02/01']
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in flagged_date_list:
continue
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
gid_list_all = filter_images_imageset(gid_list_all)
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date not in flagged_date_list:
continue
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (unixtime, aid, nid, date) in enumerate(sorted(zip(unixtime_list, aid_list, nid_list, date_list))):
if date not in flagged_date_list:
continue
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
imgsetid_list = ibs.get_valid_imgsetids()
gid_list = ibs.get_valid_gids()
gid_list = filter_images_imageset(gid_list)
aid_list = ibs.get_valid_aids()
aid_list = filter_annots_imageset(aid_list)
nid_list = ibs.get_valid_nids()
nid_list = filter_names_imageset(nid_list)
# contrib_list = ibs.get_valid_contrib_rowids()
note_list = ibs.get_image_notes(gid_list)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
contrib_list = set(note_list)
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list_count = filter_annots_imageset(aid_list_count)
gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
from ibeis.other import dbinfo as dbinfo_
try:
try:
raise KeyError()
vals = dbinfo_.estimate_ggr_count(ibs)
nsight1, nsight2, resight, pl_index, pl_error = vals
# pl_index = 'Undefined - Zero recaptured (k = 0)'
except KeyError:
index1 = bar_label_list.index('2016/01/30')
index2 = bar_label_list.index('2016/01/31')
c1 = bar_value_list4[index1]
c2 = bar_value_list4[index2]
c3 = bar_value_list6[index2]
pl_index, pl_error = dbinfo_.sight_resight_count(c1, c2, c3)
except (IndexError, ValueError):
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
gps_list_markers = [ gps for gps in gps_list_markers if tuple(gps) != (-1, -1, ) ]
gps_list_markers_all = [ gps for gps in gps_list_markers_all if tuple(gps) != (-1, -1, ) ]
gps_list_tracks = [
[ gps for gps in gps_list_track if tuple(gps) != (-1, -1, ) ]
for gps_list_track in gps_list_tracks
]
valid_aids = ibs.get_valid_aids()
valid_aids = filter_annots_imageset(valid_aids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
# used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
note_list = ibs.get_image_notes(used_gids)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
used_contrib_tags = set(note_list)
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 24 and 12 <= max_age and max_age < 24:
age_list[sex][1] += 1
elif 24 <= min_age and min_age < 36 and 24 <= max_age and max_age < 36:
age_list[sex][2] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][3] += 1
age_total = sum(map(sum, age_list)) + age_unreviewed + age_ambiguous
age_total = np.nan if age_total == 0 else age_total
age_fmt_str = (lambda x: '% 4d (% 2.02f%%)' % (x, 100 * x / age_total, ))
age_str_list = [
[
age_fmt_str(age)
for age in age_list_
]
for age_list_ in age_list
]
age_str_list.append(age_fmt_str(age_unreviewed))
age_str_list.append(age_fmt_str(age_ambiguous))
# dbinfo_str = dbinfo()
dbinfo_str = 'SKIPPED DBINFO'
path_dict = ibs.compute_ggr_path_dict()
if 'North' in path_dict:
path_dict.pop('North')
if 'Core' in path_dict:
path_dict.pop('Core')
return appf.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
path_dict=path_dict,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_str_list=age_str_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
age_total=age_total,
dbinfo_str=dbinfo_str,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=gid_list_count,
gid_list_count_str=','.join(map(str, gid_list_count)),
num_gids_count=len(gid_list_count),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=aid_list_count,
aid_list_count_str=','.join(map(str, aid_list_count)),
num_aids_count=len(aid_list_count),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags),
__wrapper_header__=False)
@register_route('/view/imagesets/', methods=['GET'])
def view_imagesets():
ibs = current_app.ibs
filtered = True
imgsetid = request.args.get('imgsetid', '')
if len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
else:
imgsetid_list = ibs.get_valid_imgsetids()
filtered = False
start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ appf.imageset_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ appf.imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ appf.imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
imageset_list = zip(
imgsetid_list,
ibs.get_imageset_text(imgsetid_list),
ibs.get_imageset_num_gids(imgsetid_list),
image_processed_list,
ibs.get_imageset_num_aids(imgsetid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
imageset_list.sort(key=lambda t: t[7])
return appf.template('view', 'imagesets',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
imageset_list=imageset_list,
num_imagesets=len(imageset_list))
@register_route('/view/image/<gid>/', methods=['GET'])
def image_view_api(gid=None, thumbnail=False, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /image/view/<gid>/
"""
encoded = routes_ajax.image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
return appf.template(None, 'single', encoded=encoded)
@register_route('/view/images/', methods=['GET'])
def view_images():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(gid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
appf.imageset_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return appf.template('view', 'images',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations/', methods=['GET'])
def view_annotations():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(aid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
ibs.get_annot_reviewed(aid_list),
# [ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list), appf.imageset_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return appf.template('view', 'annotations',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names/', methods=['GET'])
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
imgsetid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
appf.PAGE_SIZE_ = int(appf.PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * appf.PAGE_SIZE_)
page_end = min(len(nid_list), page * appf.PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / appf.PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list_), appf.imageset_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return appf.template('view', 'names',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk/', methods=['GET'])
def turk():
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
return appf.template('turk', None, imgsetid=imgsetid)
def _make_review_image_info(ibs, gid):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.web.apis_detect import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> gid = ibs.get_valid_gids()[0]
"""
# Shows how to use new object-like interface to populate data
import numpy as np
image = ibs.images([gid])[0]
annots = image.annots
width, height = image.sizes
bbox_denom = np.array([width, height, width, height])
annotation_list = []
for aid in annots.aids:
annot_ = ibs.annots(aid)[0]
bbox = np.array(annot_.bboxes)
bbox_percent = bbox / bbox_denom * 100
temp = {
'left' : bbox_percent[0],
'top' : bbox_percent[1],
'width' : bbox_percent[2],
'height' : bbox_percent[3],
'label' : annot_.species,
'id' : annot_.aids,
'theta' : annot_.thetas,
'tags' : annot_.case_tags,
}
annotation_list.append(temp)
@register_route('/turk/detection/', methods=['GET'])
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
reviewed_list = appf.imageset_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('ia-detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('ia-detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
imgdata = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(imgdata)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), imgsetid, )
return appf.template('turk', 'detection',
imgsetid=imgsetid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/detection/dynamic/', methods=['GET'])
def turk_detection_dynamic():
ibs = current_app.ibs
gid = request.args.get('gid', None)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(image)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), gid, )
return appf.template('turk', 'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_path=gpath,
image_src=image_src,
annotation_list=annotation_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
__wrapper__=False)
@register_route('/turk/annotation/', methods=['GET'])
def turk_annotation():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-annotation_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
# image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value in [-1, None]:
quality_value = None
elif quality_value > 2:
quality_value = 2
elif quality_value <= 2:
quality_value = 1
multiple_value = ibs.get_annot_multiple(aid) == 1
else:
gid = None
gpath = None
image_src = None
species = None
viewpoint_value = None
quality_value = None
multiple_value = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
multiple_value=multiple_value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/annotation/dynamic/', methods=['GET'])
def turk_annotation_dynamic():
ibs = current_app.ibs
aid = request.args.get('aid', None)
imgsetid = request.args.get('imgsetid', None)
review = 'review' in request.args.keys()
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value == -1:
quality_value = None
if quality_value == 0:
quality_value = 1
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation_dynamic',
imgsetid=imgsetid,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
image_path=gpath,
image_src=image_src,
species_list=species_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
__wrapper__=False)
@register_route('/turk/viewpoint/', methods=['GET'])
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
gpath = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
return appf.template('turk', 'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality/', methods=['GET'])
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_quality_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
imagesettext = ibs.get_imageset_text(imgsetid)
return appf.template('turk', 'quality',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/additional/', methods=['GET'])
def turk_additional():
ibs = current_app.ibs
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = appf.imageset_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
else:
name_aid_combined_list = []
region_str = 'UNKNOWN'
if aid is not None and gid is not None:
imgsetid_list = ibs.get_image_imgsetids(gid)
imgset_text_list = ibs.get_imageset_text(imgsetid_list)
imgset_text_list = [
imgset_text
for imgset_text in imgset_text_list
if 'GGR Special Zone' in imgset_text
]
assert len(imgset_text_list) < 2
if len(imgset_text_list) == 1:
region_str = imgset_text_list[0]
return appf.template('turk', 'additional',
imgsetid=imgsetid,
gid=gid,
aid=aid,
region_str=region_str,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/group_review/', methods=['GET'])
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return appf.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=appf.VALID_TURK_MODES)
@register_route('/sightings/', methods=['GET'])
def sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/api/', methods=['GET'], __api_prefix_check__=False)
def api_root():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return appf.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload/', methods=['GET'])
def upload():
return appf.template(None, 'upload')
@register_route('/dbinfo/', methods=['GET'])
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/counts/', methods=['GET'])
def wb_counts():
fmt_str = '''<p># Annotations: <b>%d</b></p>
<p># MediaAssets (images): <b>%d</b></p>
<p># MarkedIndividuals: <b>%d</b></p>
<p># Encounters: <b>%d</b></p>
<p># Occurrences: <b>%d</b></p>'''
try:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_annot_nids(aid_list)
nid_list = [ nid for nid in nid_list if nid > 0 ]
gid_list = ibs.get_annot_gids(aid_list)
imgset_id_list = ibs.get_valid_imgsetids()
aids_list = ibs.get_imageset_aids(imgset_id_list)
imgset_id_list = [
imgset_id
for imgset_id, aid_list_ in zip(imgset_id_list, aids_list)
if len(aid_list_) > 0
]
valid_nid_list = list(set(nid_list))
valid_aid_list = list(set(aid_list))
valid_gid_list = list(set(gid_list))
valid_imgset_id_list = list(set(imgset_id_list))
valid_imgset_id_list = list(set(imgset_id_list))
aids_list = ibs.get_imageset_aids(valid_imgset_id_list)
nids_list = map(ibs.get_annot_nids, aids_list)
nids_list = map(set, nids_list)
nids_list = ut.flatten(nids_list)
num_nid = len(valid_nid_list)
num_aid = len(valid_aid_list)
num_gid = len(valid_gid_list)
num_imgset = len(valid_imgset_id_list)
num_encounters = len(nids_list)
args = (num_aid, num_gid, num_nid, num_encounters, num_imgset, )
counts_str = fmt_str % args
except:
counts_str = ''
return counts_str
@register_route('/test/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias1():
return wb_counts()
@register_route('/gzgc/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias2():
return wb_counts()
@register_route('/404/', methods=['GET'])
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return appf.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
rlaboiss/pelican-plugins | post_stats/readability.py | 73 | 1362 | # -*- coding: utf-8 -*-
# Adadpted from here: http://acdx.net/calculating-the-flesch-kincaid-level-in-python/
# See here for details: http://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_test
from __future__ import division
import re
def mean(seq):
return sum(seq) / len(seq)
def syllables(word):
if len(word) <= 3:
return 1
word = re.sub(r"(es|ed|(?<!l)e)$", "", word)
return len(re.findall(r"[aeiouy]+", word))
def normalize(text):
terminators = ".!?:;"
term = re.escape(terminators)
text = re.sub(r"[^%s\sA-Za-z]+" % term, "", text)
text = re.sub(r"\s*([%s]+\s*)+" % term, ". ", text)
return re.sub(r"\s+", " ", text)
def text_stats(text, wc):
text = normalize(text)
stcs = [s.split(" ") for s in text.split(". ")]
stcs = [s for s in stcs if len(s) >= 2]
if wc:
words = wc
else:
words = sum(len(s) for s in stcs)
sbls = sum(syllables(w) for s in stcs for w in s)
return len(stcs), words, sbls
def flesch_index(stats):
stcs, words, sbls = stats
if stcs == 0 or words == 0:
return 0
return 206.835 - 1.015 * (words / stcs) - 84.6 * (sbls / words)
def flesch_kincaid_level(stats):
stcs, words, sbls = stats
if stcs == 0 or words == 0:
return 0
return 0.39 * (words / stcs) + 11.8 * (sbls / words) - 15.59
| agpl-3.0 |
shrimo/PyQt4 | examples/webkit/previewer/ui_previewer.py | 6 | 3467 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'previewer.ui'
#
# Created: Mon Nov 29 17:09:55 2010
# by: PyQt4 UI code generator snapshot-4.8.2-241fbaf4620d
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(911, 688)
self.horizontalLayout_4 = QtGui.QHBoxLayout(Form)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.editorBox = QtGui.QGroupBox(self.splitter)
self.editorBox.setObjectName(_fromUtf8("editorBox"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.editorBox)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.plainTextEdit = QtGui.QPlainTextEdit(self.editorBox)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.verticalLayout_2.addWidget(self.plainTextEdit)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.clearButton = QtGui.QPushButton(self.editorBox)
self.clearButton.setObjectName(_fromUtf8("clearButton"))
self.horizontalLayout.addWidget(self.clearButton)
self.previewButton = QtGui.QPushButton(self.editorBox)
self.previewButton.setObjectName(_fromUtf8("previewButton"))
self.horizontalLayout.addWidget(self.previewButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.previewerBox = QtGui.QGroupBox(self.splitter)
self.previewerBox.setObjectName(_fromUtf8("previewerBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.previewerBox)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.webView = QtWebKit.QWebView(self.previewerBox)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.horizontalLayout_3.addWidget(self.webView)
self.horizontalLayout_4.addWidget(self.splitter)
self.retranslateUi(Form)
QtCore.QObject.connect(self.clearButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.plainTextEdit.clear)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.editorBox.setTitle(QtGui.QApplication.translate("Form", "HTML Editor", None, QtGui.QApplication.UnicodeUTF8))
self.clearButton.setText(QtGui.QApplication.translate("Form", "Clear", None, QtGui.QApplication.UnicodeUTF8))
self.previewButton.setText(QtGui.QApplication.translate("Form", "Preview", None, QtGui.QApplication.UnicodeUTF8))
self.previewerBox.setTitle(QtGui.QApplication.translate("Form", "HTML Preview", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
| gpl-2.0 |
zstyblik/infernal-twin | build/reportlab/tests/test_platypus_accum.py | 14 | 3168 | from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, LongTable
from reportlab.platypus.doctemplate import PageAccumulator
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import simpleSplit
from reportlab.lib import colors
styleSheet = getSampleStyleSheet()
class MyPageAccumulator(PageAccumulator):
def pageEndAction(self,canv,doc):
L42 = [x[0] for x in self.data if not x[0]%42]
L13 = [x[0] for x in self.data if not x[0]%13]
if L42 and L13:
s = 'Saw multiples of 13 and 42'
elif L13:
s = 'Saw multiples of 13'
elif L42:
s = 'Saw multiples of 42'
else:
return
canv.saveState()
canv.setFillColor(colors.purple)
canv.setFont("Helvetica",6)
canv.drawString(1*inch,1*inch,s)
canv.restoreState()
PA = MyPageAccumulator('_42_divides')
class MyDocTemplate(SimpleDocTemplate):
def beforeDocument(self):
for pt in self.pageTemplates:
PA.attachToPageTemplate(pt)
def textAccum2():
doc = MyDocTemplate(outputfile('test_platypus_accum2.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
('FONTNAME',(0,0),(-1,-1),'Helvetica'),
('FONTSIZE',(0,0),(-1,-1),10),
]
def myCV(s,fontName='Helvetica',fontSize=10,maxWidth=72):
return '\n'.join(simpleSplit(s,fontName,fontSize,maxWidth))
data = [[PA.onDrawStr(str(i+1),i+1),
myCV("xx "* (i%10),maxWidth=100-12),
myCV("blah "*(i%40),maxWidth=200-12)]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
def textAccum1():
doc = MyDocTemplate(outputfile('test_platypus_accum1.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i+1), Paragraph("xx "* (i%10),
styleSheet["BodyText"]),
Paragraph(("blah "*(i%40))+PA.onDrawText(i+1), styleSheet["BodyText"])]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test1(self):
textAccum1()
def test2(self):
textAccum2()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| gpl-3.0 |
kiall/designate-py3 | tools/install_venv.py | 11 | 2341 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv # noqa
def print_help(venv, root):
help = """
Designate development environment setup is complete.
Designate development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the Designate virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Designate'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
mangalaman93/docker-py | docker/utils/types.py | 43 | 2304 | import six
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class DictType(dict):
def __init__(self, init):
for k, v in six.iteritems(init):
self[k] = v
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
| apache-2.0 |
alander/StarCluster | starcluster/commands/removenode.py | 19 | 5236 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import warnings
from starcluster.logger import log
from starcluster.commands.completers import ClusterCompleter
class CmdRemoveNode(ClusterCompleter):
"""
removenode [options] <cluster_tag>
Terminate one or more nodes in the cluster
Examples:
$ starcluster removenode mycluster
This will automatically fetch a single worker node, detach it from the
cluster, and then terminate it. If you'd rather be specific about which
node(s) to remove then use the -a option:
$ starcluster removenode mycluster -a node003
You can also specify multiple nodes to remove and terminate one after
another, e.g.:
$ starcluster removenode mycluster -n 3
or
$ starcluster removenode mycluster -a node001,node002,node003
If you'd rather not terminate the node(s) after detaching from the cluster,
use the -k option:
$ starcluster removenode -k mycluster -a node001,node002,node003
This will detach the nodes from the cluster but leave the instances
running. These nodes can then later be reattached to the cluster using:
$ starcluster addnode mycluster -x -a node001,node002,node003
This can be useful, for example, when testing on_add_node and
on_remove_node methods in a StarCluster plugin.
"""
names = ['removenode', 'rn']
tag = None
def addopts(self, parser):
parser.add_option("-f", "--force", dest="force", action="store_true",
default=False, help="Terminate node regardless "
"of errors if possible ")
parser.add_option("-k", "--keep-instance", dest="terminate",
action="store_false", default=True,
help="do not terminate nodes "
"after detaching them from the cluster")
parser.add_option("-c", "--confirm", dest="confirm",
action="store_true", default=False,
help="Do not prompt for confirmation, "
"just remove the node(s)")
parser.add_option("-n", "--num-nodes", dest="num_nodes",
action="store", type="int", default=1,
help="number of nodes to remove")
parser.add_option("-a", "--aliases", dest="aliases", action="append",
type="string", default=[],
help="list of nodes to remove (e.g. "
"node001,node002,node003)")
def execute(self, args):
if not len(args) >= 1:
self.parser.error("please specify a cluster <cluster_tag>")
if len(args) >= 2:
warnings.warn(
"Passing node names as arguments is deprecated. Please "
"start using the -a option. Pass --help for more details",
DeprecationWarning)
tag = self.tag = args[0]
aliases = []
for alias in self.opts.aliases:
aliases.extend(alias.split(','))
old_form_aliases = args[1:]
if old_form_aliases:
if aliases:
self.parser.error(
"you must either use a list of nodes as arguments OR "
"use the -a option - not both")
else:
aliases = old_form_aliases
if ('master' in aliases) or ('%s-master' % tag in aliases):
self.parser.error(
"'master' and '%s-master' are reserved aliases" % tag)
num_nodes = self.opts.num_nodes
if num_nodes == 1 and aliases:
num_nodes = len(aliases)
if num_nodes > 1 and aliases and len(aliases) != num_nodes:
self.parser.error("you must specify the same number of aliases "
"(-a) as nodes (-n)")
dupe = self._get_duplicate(aliases)
if dupe:
self.parser.error("cannot have duplicate aliases (duplicate: %s)" %
dupe)
if not self.opts.confirm:
resp = raw_input("Remove %s from %s (y/n)? " %
(', '.join(aliases) or '%s nodes' % num_nodes,
tag))
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
self.cm.remove_nodes(tag, aliases=aliases, num_nodes=num_nodes,
terminate=self.opts.terminate,
force=self.opts.force)
| gpl-3.0 |
mbrugg/MC-EWIO64-ORG | board/pxa255_idp/pxa_reg_calcs.py | 65 | 11003 | #!/usr/bin/python
# (C) Copyright 2004
# BEC Systems <http://bec-systems.com>
# Cliff Brake <[email protected]>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# calculations for PXA255 registers
class gpio:
dir = '0'
set = '0'
clr = '0'
alt = '0'
desc = ''
def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
self.dir = dir
self.set = set
self.clr = clr
self.alt = alt
self.desc = desc
# the following is a dictionary of all GPIOs in the system
# the key is the GPIO number
pxa255_alt_func = {
0: ['gpio', 'none', 'none', 'none'],
1: ['gpio', 'gpio reset', 'none', 'none'],
2: ['gpio', 'none', 'none', 'none'],
3: ['gpio', 'none', 'none', 'none'],
4: ['gpio', 'none', 'none', 'none'],
5: ['gpio', 'none', 'none', 'none'],
6: ['gpio', 'MMC clk', 'none', 'none'],
7: ['gpio', '48MHz clock', 'none', 'none'],
8: ['gpio', 'MMC CS0', 'none', 'none'],
9: ['gpio', 'MMC CS1', 'none', 'none'],
10: ['gpio', 'RTC Clock', 'none', 'none'],
11: ['gpio', '3.6MHz', 'none', 'none'],
12: ['gpio', '32KHz', 'none', 'none'],
13: ['gpio', 'none', 'MBGNT', 'none'],
14: ['gpio', 'MBREQ', 'none', 'none'],
15: ['gpio', 'none', 'nCS_1', 'none'],
16: ['gpio', 'none', 'PWM0', 'none'],
17: ['gpio', 'none', 'PWM1', 'none'],
18: ['gpio', 'RDY', 'none', 'none'],
19: ['gpio', 'DREQ[1]', 'none', 'none'],
20: ['gpio', 'DREQ[0]', 'none', 'none'],
21: ['gpio', 'none', 'none', 'none'],
22: ['gpio', 'none', 'none', 'none'],
23: ['gpio', 'none', 'SSP SCLK', 'none'],
24: ['gpio', 'none', 'SSP SFRM', 'none'],
25: ['gpio', 'none', 'SSP TXD', 'none'],
26: ['gpio', 'SSP RXD', 'none', 'none'],
27: ['gpio', 'SSP EXTCLK', 'none', 'none'],
28: ['gpio', 'AC97 bitclk in, I2S bitclock out', 'I2S bitclock in', 'none'],
29: ['gpio', 'AC97 SDATA_IN0', 'I2S SDATA_IN', 'none'],
30: ['gpio', 'I2S SDATA_OUT', 'AC97 SDATA_OUT', 'none'],
31: ['gpio', 'I2S SYNC', 'AC97 SYNC', 'none'],
32: ['gpio', 'AC97 SDATA_IN1', 'I2S SYSCLK', 'none'],
33: ['gpio', 'none', 'nCS_5', 'none'],
34: ['gpio', 'FF RXD', 'MMC CS0', 'none'],
35: ['gpio', 'FF CTS', 'none', 'none'],
36: ['gpio', 'FF DCD', 'none', 'none'],
37: ['gpio', 'FF DSR', 'none', 'none'],
38: ['gpio', 'FF RI', 'none', 'none'],
39: ['gpio', 'MMC CS1', 'FF TXD', 'none'],
40: ['gpio', 'none', 'FF DTR', 'none'],
41: ['gpio', 'none', 'FF RTS', 'none'],
42: ['gpio', 'BT RXD', 'none', 'HW RXD'],
43: ['gpio', 'none', 'BT TXD', 'HW TXD'],
44: ['gpio', 'BT CTS', 'none', 'HW CTS'],
45: ['gpio', 'none', 'BT RTS', 'HW RTS'],
46: ['gpio', 'ICP_RXD', 'STD RXD', 'none'],
47: ['gpio', 'STD TXD', 'ICP_TXD', 'none'],
48: ['gpio', 'HW TXD', 'nPOE', 'none'],
49: ['gpio', 'HW RXD', 'nPWE', 'none'],
50: ['gpio', 'HW CTS', 'nPIOR', 'none'],
51: ['gpio', 'nPIOW', 'HW RTS', 'none'],
52: ['gpio', 'none', 'nPCE[1]', 'none'],
53: ['gpio', 'MMC CLK', 'nPCE[2]', 'none'],
54: ['gpio', 'MMC CLK', 'nPSKSEL', 'none'],
55: ['gpio', 'none', 'nPREG', 'none'],
56: ['gpio', 'nPWAIT', 'none', 'none'],
57: ['gpio', 'nIOIS16', 'none', 'none'],
58: ['gpio', 'none', 'LDD[0]', 'none'],
59: ['gpio', 'none', 'LDD[1]', 'none'],
60: ['gpio', 'none', 'LDD[2]', 'none'],
61: ['gpio', 'none', 'LDD[3]', 'none'],
62: ['gpio', 'none', 'LDD[4]', 'none'],
63: ['gpio', 'none', 'LDD[5]', 'none'],
64: ['gpio', 'none', 'LDD[6]', 'none'],
65: ['gpio', 'none', 'LDD[7]', 'none'],
66: ['gpio', 'MBREQ', 'LDD[8]', 'none'],
67: ['gpio', 'MMC CS0', 'LDD[9]', 'none'],
68: ['gpio', 'MMC CS1', 'LDD[10]', 'none'],
69: ['gpio', 'MMC CLK', 'LDD[11]', 'none'],
70: ['gpio', 'RTC CLK', 'LDD[12]', 'none'],
71: ['gpio', '3.6 MHz', 'LDD[13]', 'none'],
72: ['gpio', '32 KHz', 'LDD[14]', 'none'],
73: ['gpio', 'MBGNT', 'LDD[15]', 'none'],
74: ['gpio', 'none', 'LCD_FCLK', 'none'],
75: ['gpio', 'none', 'LCD_LCLK', 'none'],
76: ['gpio', 'none', 'LCD_PCLK', 'none'],
77: ['gpio', 'none', 'LCD_ACBIAS', 'none'],
78: ['gpio', 'none', 'nCS_2', 'none'],
79: ['gpio', 'none', 'nCS_3', 'none'],
80: ['gpio', 'none', 'nCS_4', 'none'],
81: ['gpio', 'NSSPSCLK', 'none', 'none'],
82: ['gpio', 'NSSPSFRM', 'none', 'none'],
83: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
84: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
}
#def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
gpio_list = []
for i in range(0,85):
gpio_list.append(gpio())
#chip select GPIOs
gpio_list[18] = gpio(0, 0, 0, 1, 'RDY')
gpio_list[33] = gpio(1, 1, 0, 2, 'CS5#')
gpio_list[80] = gpio(1, 1, 0, 2, 'CS4#')
gpio_list[79] = gpio(1, 1, 0, 2, 'CS3#')
gpio_list[78] = gpio(1, 1, 0, 2, 'CS2#')
gpio_list[15] = gpio(1, 1, 0, 2, 'CS1#')
gpio_list[22] = gpio(0, 0, 0, 0, 'Consumer IR, PCC_S1_IRQ_O#')
gpio_list[21] = gpio(0, 0, 0, 0, 'IRQ_IDE, PFI')
gpio_list[19] = gpio(0, 0, 0, 0, 'XB_DREQ1, PCC_SO_IRQ_O#')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[17] = gpio(0, 0, 0, 0, 'IRQ_AXB')
gpio_list[16] = gpio(1, 0, 0, 2, 'PWM0')
# PCMCIA stuff
gpio_list[57] = gpio(0, 0, 0, 1, 'PCC_IOIS16#')
gpio_list[56] = gpio(0, 0, 0, 1, 'PCC_WAIT#')
gpio_list[55] = gpio(1, 0, 0, 2, 'PCC_REG#')
gpio_list[54] = gpio(1, 0, 0, 2, 'PCC_SCKSEL')
gpio_list[53] = gpio(1, 1, 0, 2, 'PCC_CE2#')
gpio_list[52] = gpio(1, 1, 0, 2, 'PCC_CE1#')
gpio_list[51] = gpio(1, 1, 0, 1, 'PCC_IOW#')
gpio_list[50] = gpio(1, 1, 0, 2, 'PCC_IOR#')
gpio_list[49] = gpio(1, 1, 0, 2, 'PCC_WE#')
gpio_list[48] = gpio(1, 1, 0, 2, 'PCC_OE#')
# SSP port
gpio_list[26] = gpio(0, 0, 0, 1, 'SSP_RXD')
gpio_list[25] = gpio(0, 0, 0, 0, 'SSP_TXD')
gpio_list[24] = gpio(1, 0, 1, 2, 'SSP_SFRM')
gpio_list[23] = gpio(1, 0, 1, 2, 'SSP_SCLK')
gpio_list[27] = gpio(0, 0, 0, 0, 'SSP_EXTCLK')
# audio codec
gpio_list[32] = gpio(0, 0, 0, 0, 'AUD_SDIN1')
gpio_list[31] = gpio(1, 0, 0, 2, 'AC_SYNC')
gpio_list[30] = gpio(1, 0, 0, 2, 'AC_SDOUT')
gpio_list[29] = gpio(0, 0, 0, 1, 'AUD_SDIN0')
gpio_list[28] = gpio(0, 0, 0, 1, 'AC_BITCLK')
# serial ports
gpio_list[39] = gpio(1, 0, 0, 2, 'FF_TXD')
gpio_list[34] = gpio(0, 0, 0, 1, 'FF_RXD')
gpio_list[41] = gpio(1, 0, 0, 2, 'FF_RTS')
gpio_list[35] = gpio(0, 0, 0, 1, 'FF_CTS')
gpio_list[40] = gpio(1, 0, 0, 2, 'FF_DTR')
gpio_list[37] = gpio(0, 0, 0, 1, 'FF_DSR')
gpio_list[38] = gpio(0, 0, 0, 1, 'FF_RI')
gpio_list[36] = gpio(0, 0, 0, 1, 'FF_DCD')
gpio_list[43] = gpio(1, 0, 0, 2, 'BT_TXD')
gpio_list[42] = gpio(0, 0, 0, 1, 'BT_RXD')
gpio_list[45] = gpio(1, 0, 0, 2, 'BT_RTS')
gpio_list[44] = gpio(0, 0, 0, 1, 'BT_CTS')
gpio_list[47] = gpio(1, 0, 0, 1, 'IR_TXD')
gpio_list[46] = gpio(0, 0, 0, 2, 'IR_RXD')
# misc GPIO signals
gpio_list[14] = gpio(0, 0, 0, 0, 'MBREQ')
gpio_list[13] = gpio(0, 0, 0, 0, 'MBGNT')
gpio_list[12] = gpio(0, 0, 0, 0, 'GPIO_12/32K_CLK')
gpio_list[11] = gpio(0, 0, 0, 0, '3M6_CLK')
gpio_list[10] = gpio(1, 0, 1, 0, 'GPIO_10/RTC_CLK/debug LED')
gpio_list[9] = gpio(0, 0, 0, 0, 'MMC_CD#')
gpio_list[8] = gpio(0, 0, 0, 0, 'PCC_S1_CD#')
gpio_list[7] = gpio(0, 0, 0, 0, 'PCC_S0_CD#')
gpio_list[6] = gpio(1, 0, 0, 1, 'MMC_CLK')
gpio_list[5] = gpio(0, 0, 0, 0, 'IRQ_TOUCH#')
gpio_list[4] = gpio(0, 0, 0, 0, 'IRQ_ETH')
gpio_list[3] = gpio(0, 0, 0, 0, 'MQ_IRQ#')
gpio_list[2] = gpio(0, 0, 0, 0, 'BAT_DATA')
gpio_list[1] = gpio(0, 0, 0, 1, 'USER_RESET#')
gpio_list[0] = gpio(0, 0, 0, 1, 'USER_RESET#')
# LCD GPIOs
gpio_list[58] = gpio(1, 0, 0, 2, 'LDD0')
gpio_list[59] = gpio(1, 0, 0, 2, 'LDD1')
gpio_list[60] = gpio(1, 0, 0, 2, 'LDD2')
gpio_list[61] = gpio(1, 0, 0, 2, 'LDD3')
gpio_list[62] = gpio(1, 0, 0, 2, 'LDD4')
gpio_list[63] = gpio(1, 0, 0, 2, 'LDD5')
gpio_list[64] = gpio(1, 0, 0, 2, 'LDD6')
gpio_list[65] = gpio(1, 0, 0, 2, 'LDD7')
gpio_list[66] = gpio(1, 0, 0, 2, 'LDD8')
gpio_list[67] = gpio(1, 0, 0, 2, 'LDD9')
gpio_list[68] = gpio(1, 0, 0, 2, 'LDD10')
gpio_list[69] = gpio(1, 0, 0, 2, 'LDD11')
gpio_list[70] = gpio(1, 0, 0, 2, 'LDD12')
gpio_list[71] = gpio(1, 0, 0, 2, 'LDD13')
gpio_list[72] = gpio(1, 0, 0, 2, 'LDD14')
gpio_list[73] = gpio(1, 0, 0, 2, 'LDD15')
gpio_list[74] = gpio(1, 0, 0, 2, 'FCLK')
gpio_list[75] = gpio(1, 0, 0, 2, 'LCLK')
gpio_list[76] = gpio(1, 0, 0, 2, 'PCLK')
gpio_list[77] = gpio(1, 0, 0, 2, 'ACBIAS')
# calculate registers
pxa_regs = {
'gpdr0':0, 'gpdr1':0, 'gpdr2':0,
'gpsr0':0, 'gpsr1':0, 'gpsr2':0,
'gpcr0':0, 'gpcr1':0, 'gpcr2':0,
'gafr0_l':0, 'gafr0_u':0,
'gafr1_l':0, 'gafr1_u':0,
'gafr2_l':0, 'gafr2_u':0,
}
# U-boot define names
uboot_reg_names = {
'gpdr0':'CFG_GPDR0_VAL', 'gpdr1':'CFG_GPDR1_VAL', 'gpdr2':'CFG_GPDR2_VAL',
'gpsr0':'CFG_GPSR0_VAL', 'gpsr1':'CFG_GPSR1_VAL', 'gpsr2':'CFG_GPSR2_VAL',
'gpcr0':'CFG_GPCR0_VAL', 'gpcr1':'CFG_GPCR1_VAL', 'gpcr2':'CFG_GPCR2_VAL',
'gafr0_l':'CFG_GAFR0_L_VAL', 'gafr0_u':'CFG_GAFR0_U_VAL',
'gafr1_l':'CFG_GAFR1_L_VAL', 'gafr1_u':'CFG_GAFR1_U_VAL',
'gafr2_l':'CFG_GAFR2_L_VAL', 'gafr2_u':'CFG_GAFR2_U_VAL',
}
# bit mappings
bit_mappings = [
{ 'gpio':(0,32), 'shift':1, 'regs':{'dir':'gpdr0', 'set':'gpsr0', 'clr':'gpcr0'} },
{ 'gpio':(32,64), 'shift':1, 'regs':{'dir':'gpdr1', 'set':'gpsr1', 'clr':'gpcr1'} },
{ 'gpio':(64,85), 'shift':1, 'regs':{'dir':'gpdr2', 'set':'gpsr2', 'clr':'gpcr2'} },
{ 'gpio':(0,16), 'shift':2, 'regs':{'alt':'gafr0_l'} },
{ 'gpio':(16,32), 'shift':2, 'regs':{'alt':'gafr0_u'} },
{ 'gpio':(32,48), 'shift':2, 'regs':{'alt':'gafr1_l'} },
{ 'gpio':(48,64), 'shift':2, 'regs':{'alt':'gafr1_u'} },
{ 'gpio':(64,80), 'shift':2, 'regs':{'alt':'gafr2_l'} },
{ 'gpio':(80,85), 'shift':2, 'regs':{'alt':'gafr2_u'} },
]
def stuff_bits(bit_mapping, gpio_list):
gpios = range( bit_mapping['gpio'][0], bit_mapping['gpio'][1])
for gpio in gpios:
for reg in bit_mapping['regs'].keys():
value = eval( 'gpio_list[gpio].%s' % (reg) )
if ( value ):
# we have a high bit
bit_shift = (gpio - bit_mapping['gpio'][0]) * bit_mapping['shift']
bit = value << (bit_shift)
pxa_regs[bit_mapping['regs'][reg]] |= bit
for i in bit_mappings:
stuff_bits(i, gpio_list)
# now print out all regs
registers = pxa_regs.keys()
registers.sort()
for reg in registers:
print '%s: 0x%x' % (reg, pxa_regs[reg])
# print define to past right into U-Boot source code
print
print
for reg in registers:
print '#define %s 0x%x' % (uboot_reg_names[reg], pxa_regs[reg])
# print all GPIOS
print
print
for i in range(len(gpio_list)):
gpio_i = gpio_list[i]
alt_func_desc = pxa255_alt_func[i][gpio_i.alt]
print 'GPIO: %i, dir=%i, set=%i, clr=%i, alt=%s, desc=%s' % (i, gpio_i.dir, gpio_i.set, gpio_i.clr, alt_func_desc, gpio_i.desc)
| gpl-2.0 |
cgcgbcbc/django-xadmin | xadmin/plugins/themes.py | 25 | 2797 | #coding:utf-8
import urllib
from django.template import loader
from django.core.cache import cache
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.models import UserSettings
from xadmin.views import BaseAdminPlugin, BaseAdminView
from xadmin.util import static, json
THEME_CACHE_KEY = 'xadmin_themes'
class ThemePlugin(BaseAdminPlugin):
enable_themes = False
# {'name': 'Blank Theme', 'description': '...', 'css': 'http://...', 'thumbnail': '...'}
user_themes = None
use_bootswatch = False
default_theme = static('xadmin/css/themes/bootstrap-xadmin.css')
bootstrap2_theme = static('xadmin/css/themes/bootstrap-theme.css')
def init_request(self, *args, **kwargs):
return self.enable_themes
def _get_theme(self):
if self.user:
try:
return UserSettings.objects.get(user=self.user, key="site-theme").value
except Exception:
pass
if '_theme' in self.request.COOKIES:
return urllib.unquote(self.request.COOKIES['_theme'])
return self.default_theme
def get_context(self, context):
context['site_theme'] = self._get_theme()
return context
# Media
def get_media(self, media):
return media + self.vendor('jquery-ui-effect.js', 'xadmin.plugin.themes.js')
# Block Views
def block_top_navmenu(self, context, nodes):
themes = [{'name': _(u"Default"), 'description': _(
u"Default bootstrap theme"), 'css': self.default_theme},
{'name': _(u"Bootstrap2"), 'description': _(u"Bootstrap 2.x theme"),
'css': self.bootstrap2_theme}]
select_css = context.get('site_theme', self.default_theme)
if self.user_themes:
themes.extend(self.user_themes)
if self.use_bootswatch:
ex_themes = cache.get(THEME_CACHE_KEY)
if ex_themes:
themes.extend(json.loads(ex_themes))
else:
ex_themes = []
try:
watch_themes = json.loads(urllib.urlopen(
'http://api.bootswatch.com/3/').read())['themes']
ex_themes.extend([
{'name': t['name'], 'description': t['description'],
'css': t['cssMin'], 'thumbnail': t['thumbnail']}
for t in watch_themes])
except Exception:
pass
cache.set(THEME_CACHE_KEY, json.dumps(ex_themes), 24 * 3600)
themes.extend(ex_themes)
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.theme.html', {'themes': themes, 'select_css': select_css}))
site.register_plugin(ThemePlugin, BaseAdminView)
| bsd-3-clause |
myerpengine/odoo | openerp/modules/migration.py | 64 | 7546 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules migration handling. """
import imp
import logging
import os
from os.path import join as opj
import openerp
import openerp.release as release
import openerp.tools as tools
from openerp.tools.parse_version import parse_version
_logger = logging.getLogger(__name__)
class MigrationManager(object):
"""
This class manage the migration of modules
Migrations files must be python files containing a "migrate(cr, installed_version)" function.
Theses files must respect a directory tree structure: A 'migrations' folder which containt a
folder by version. Version can be 'module' version or 'server.module' version (in this case,
the files will only be processed by this version of the server). Python file names must start
by 'pre' or 'post' and will be executed, respectively, before and after the module initialisation
Example:
<moduledir>
`-- migrations
|-- 1.0
| |-- pre-update_table_x.py
| |-- pre-update_table_y.py
| |-- post-clean-data.py
| `-- README.txt # not processed
|-- 5.0.1.1 # files in this folder will be executed only on a 5.0 server
| |-- pre-delete_table_z.py
| `-- post-clean-data.py
`-- foo.py # not processed
This similar structure is generated by the maintenance module with the migrations files get by
the maintenance contract
"""
def __init__(self, cr, graph):
self.cr = cr
self.graph = graph
self.migrations = {}
self._get_files()
def _get_files(self):
"""
import addons.base.maintenance.utils as maintenance_utils
maintenance_utils.update_migrations_files(self.cr)
#"""
for pkg in self.graph:
self.migrations[pkg.name] = {}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade'):
continue
get_module_filetree = openerp.modules.module.get_module_filetree
self.migrations[pkg.name]['module'] = get_module_filetree(pkg.name, 'migrations') or {}
self.migrations[pkg.name]['maintenance'] = get_module_filetree('base', 'maintenance/migrations/' + pkg.name) or {}
def migrate_module(self, pkg, stage):
assert stage in ('pre', 'post')
stageformat = {
'pre': '[>%s]',
'post': '[%s>]',
}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade') or pkg.installed_version is None:
return
def convert_version(version):
if version.count('.') >= 2:
return version # the version number already containt the server version
return "%s.%s" % (release.major_version, version)
def _get_migration_versions(pkg):
def __get_dir(tree):
return [d for d in tree if tree[d] is not None]
versions = list(set(
__get_dir(self.migrations[pkg.name]['module']) +
__get_dir(self.migrations[pkg.name]['maintenance'])
))
versions.sort(key=lambda k: parse_version(convert_version(k)))
return versions
def _get_migration_files(pkg, version, stage):
""" return a list of tuple (module, file)
"""
m = self.migrations[pkg.name]
lst = []
mapping = {
'module': opj(pkg.name, 'migrations'),
'maintenance': opj('base', 'maintenance', 'migrations', pkg.name),
}
for x in mapping.keys():
if version in m[x]:
for f in m[x][version]:
if m[x][version][f] is not None:
continue
if not f.startswith(stage + '-'):
continue
lst.append(opj(mapping[x], version, f))
lst.sort()
return lst
def mergedict(a, b):
a = a.copy()
a.update(b)
return a
parsed_installed_version = parse_version(pkg.installed_version or '')
current_version = parse_version(convert_version(pkg.data['version']))
versions = _get_migration_versions(pkg)
for version in versions:
if parsed_installed_version < parse_version(convert_version(version)) <= current_version:
strfmt = {'addon': pkg.name,
'stage': stage,
'version': stageformat[stage] % version,
}
for pyfile in _get_migration_files(pkg, version, stage):
name, ext = os.path.splitext(os.path.basename(pyfile))
if ext.lower() != '.py':
continue
mod = fp = fp2 = None
try:
fp = tools.file_open(pyfile)
# imp.load_source need a real file object, so we create
# one from the file-like object we get from file_open
fp2 = os.tmpfile()
fp2.write(fp.read())
fp2.seek(0)
try:
mod = imp.load_source(name, pyfile, fp2)
_logger.info('module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
migrate = mod.migrate
except ImportError:
_logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
raise
except AttributeError:
_logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
else:
migrate(self.cr, pkg.installed_version)
finally:
if fp:
fp.close()
if fp2:
fp2.close()
if mod:
del mod
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dstroppa/openstack-smartos-nova-grizzly | nova/tests/test_test.py | 25 | 1731 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
from oslo.config import cfg
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.flags(use_local=True, group='conductor')
self.useFixture(test.ServiceFixture('compute'))
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
connection.create_consumer('compute', proxy, fanout=False)
connection.consume_in_thread()
| apache-2.0 |
andmos/ansible | lib/ansible/modules/cloud/google/gcp_sql_database.py | 9 | 9612 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_database
description:
- Represents a SQL database inside the Cloud SQL instance, hosted in Google's cloud.
short_description: Creates a GCP Database
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
charset:
description:
- The MySQL charset value.
required: false
collation:
description:
- The MySQL collation value.
required: false
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
required: false
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_sql_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a instance
gcp_sql_instance:
name: "{{resource_name}}-3"
settings:
ip_configuration:
authorized_networks:
- name: google dns server
value: 8.8.8.8/32
tier: db-n1-standard-1
region: us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instance
- name: create a database
gcp_sql_database:
name: "test_object"
charset: utf8
instance: "{{ instance }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
charset:
description:
- The MySQL charset value.
returned: success
type: str
collation:
description:
- The MySQL collation value.
returned: success
type: str
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
charset=dict(type='str'),
collation=dict(type='str'),
name=dict(type='str'),
instance=dict(required=True),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
state = module.params['state']
kind = 'sql#database'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'sql#database',
u'charset': module.params.get('charset'),
u'collation': module.params.get('collation'),
u'name': module.params.get('name'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'sql')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name'), 'name': module.params['name']}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases/{name}".format(**res)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**res)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
# SQL only: return on 403 if not exist
if allow_not_found and response.status_code == 403:
return None
try:
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {u'charset': response.get(u'charset'), u'collation': response.get(u'collation'), u'name': response.get(u'name')}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'sql#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'sql#database')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'sql#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| gpl-3.0 |
Jannes123/inasafe | safe/impact_functions/inundation/flood_polygon_population/test/test_flood_polygon_population.py | 2 | 3475 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- *Flood Vector on Population Test Cases.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
__date__ = '20/03/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import numpy
from safe.storage.core import read_layer
from safe.impact_functions.impact_function_manager \
import ImpactFunctionManager
from safe.test.utilities import get_qgis_app, test_data_path
from safe.impact_functions.inundation.flood_polygon_population\
.impact_function import FloodEvacuationVectorHazardFunction
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class TestFloodEvacuationVectorHazardFunction(unittest.TestCase):
"""Test for Flood Vector Building Impact Function."""
def setUp(self):
registry = ImpactFunctionManager().registry
registry.clear()
registry.register(FloodEvacuationVectorHazardFunction)
def test_run(self):
function = FloodEvacuationVectorHazardFunction.instance()
hazard_path = test_data_path('hazard', 'flood_multipart_polygons.shp')
exposure_path = test_data_path(
'exposure', 'pop_binary_raster_20_20.asc')
hazard_layer = read_layer(hazard_path)
exposure_layer = read_layer(exposure_path)
function.hazard = hazard_layer
function.exposure = exposure_layer
function.parameters['affected_field'] = 'FLOODPRONE'
function.parameters['affected_value'] = 'YES'
function.run()
impact = function.impact
keywords = impact.get_keywords()
# print "keywords", keywords
affected_population = numpy.nansum(impact.get_data())
total_population = keywords['total_population']
self.assertEqual(affected_population, 20)
self.assertEqual(total_population, 200)
def test_filter(self):
"""Test filtering IF from layer keywords"""
hazard_keywords = {
'layer_purpose': 'hazard',
'layer_mode': 'classified',
'layer_geometry': 'polygon',
'hazard': 'flood',
'hazard_category': 'single_event',
'vector_hazard_classification': 'flood_vector_hazard_classes'
}
exposure_keywords = {
'layer_purpose': 'exposure',
'layer_mode': 'continuous',
'layer_geometry': 'raster',
'exposure': 'population',
'exposure_unit': 'count'
}
impact_functions = ImpactFunctionManager().filter_by_keywords(
hazard_keywords, exposure_keywords)
message = 'There should be 1 impact function, but there are: %s' % \
len(impact_functions)
self.assertEqual(1, len(impact_functions), message)
retrieved_if = impact_functions[0].metadata().as_dict()['id']
expected = ImpactFunctionManager().get_function_id(
FloodEvacuationVectorHazardFunction)
message = 'Expecting %s, but getting %s instead' % (
expected, retrieved_if)
self.assertEqual(expected, retrieved_if, message)
| gpl-3.0 |
gautam1858/tensorflow | tensorflow/python/framework/subscribe_test.py | 22 | 13362 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
self.evaluate([v1.initializer])
self.evaluate([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
self.evaluate([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
self.evaluate([assign_v1])
self.assertEqual(1, len(shared))
self.evaluate([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
@test_util.run_v1_only('b/120545219')
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.cached_session() as sess:
self.evaluate([reader])
self.assertEqual(0, len(shared))
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
self.evaluate([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
@test_util.run_deprecated_v1
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
@test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.cached_session() as sess:
self.evaluate(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/pip/_vendor/requests/packages/urllib3/connection.py | 511 | 11617 | from __future__ import absolute_import
import datetime
import logging
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection
from http.client import HTTPException # noqa: unused in this module
except ImportError:
from httplib import HTTPConnection as _HTTPConnection
from httplib import HTTPException # noqa: unused in this module
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
self.putrequest(method, url, skip_accept_encoding=skip_accept_encoding)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (six.binary_type,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
if (ca_certs or ca_cert_dir) and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| bsd-3-clause |
j0lly/molecule | test/unit/verifier/test_ansible_lint.py | 3 | 1797 | # Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
import sh
from molecule.verifier import ansible_lint
@pytest.fixture()
def ansible_lint_instance(molecule_instance):
return ansible_lint.AnsibleLint(molecule_instance)
def test_execute(monkeypatch, patched_run_command, ansible_lint_instance):
monkeypatch.setenv('HOME', '/foo/bar')
ansible_lint_instance.execute()
parts = pytest.helpers.os_split(ansible_lint_instance._playbook)
assert 'playbook_data.yml' == parts[-1]
x = sh.ansible_lint.bake(ansible_lint_instance._playbook, '--exclude .git',
'--exclude .vagrant', '--exclude .molecule')
patched_run_command.assert_called_once_with(x, debug=None)
| mit |
glove747/liberty-neutron | neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py | 32 | 2545 | # Copyright 2015 Huawei Technologies India Pvt Ltd, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""qos db changes
Revision ID: 48153cb5f051
Revises: 1b4c6e320f79
Create Date: 2015-06-24 17:03:34.965101
"""
# revision identifiers, used by Alembic.
revision = '48153cb5f051'
down_revision = '1b4c6e320f79'
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attrs
def upgrade():
op.create_table(
'qos_policies',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('name', sa.String(length=attrs.NAME_MAX_LEN)),
sa.Column('description', sa.String(length=attrs.DESCRIPTION_MAX_LEN)),
sa.Column('shared', sa.Boolean(), nullable=False),
sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN),
index=True))
op.create_table(
'qos_network_policy_bindings',
sa.Column('policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('network_id', sa.String(length=36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
nullable=False, unique=True))
op.create_table(
'qos_port_policy_bindings',
sa.Column('policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('port_id', sa.String(length=36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
nullable=False, unique=True))
op.create_table(
'qos_bandwidth_limit_rules',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('qos_policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False, unique=True),
sa.Column('max_kbps', sa.Integer()),
sa.Column('max_burst_kbps', sa.Integer()))
| apache-2.0 |
Zentyal/samba | source4/scripting/devel/repl_cleartext_pwd.py | 43 | 16362 | #!/usr/bin/env python
#
# Copyright Stefan Metzmacher 2011-2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This is useful to sync passwords from an AD domain.
#
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4 \
# 172.31.9.219 DC=bla,DC=base /tmp/cookie cleartext_utf8 131085 displayName
# # starting at usn[0]
# dn: CN=Test User1,CN=Users,DC=bla,DC=base
# cleartext_utf8: A1b2C3d4
# displayName:: VABlAHMAdAAgAFUAcwBlAHIAMQA=
#
# # up to usn[16449]
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4
# 172.31.9.219 DC=bla,DC=base cookie_file cleartext_utf8 131085 displayName
# # starting at usn[16449]
# # up to usn[16449]
# $
#
import sys
# Find right direction when running from source tree
sys.path.insert(0, "bin/python")
import samba.getopt as options
from optparse import OptionParser
from samba.dcerpc import drsuapi, drsblobs, misc
from samba.ndr import ndr_pack, ndr_unpack, ndr_print
import binascii
import hashlib
import Crypto.Cipher.ARC4
import struct
import os
from ldif import LDIFWriter
class globals:
def __init__(self):
self.global_objs = {}
self.ldif = LDIFWriter(sys.stdout)
def add_attr(self, dn, attname, vals):
if dn not in self.global_objs:
self.global_objs[dn] = {}
self.global_objs[dn][attname] = vals
def print_all(self):
for dn, obj in self.global_objs.items():
self.ldif.unparse(dn, obj)
continue
self.global_objs = {}
def attid_equal(a1,a2):
return (a1 & 0xffffffff) == (a2 & 0xffffffff)
########### main code ###########
if __name__ == "__main__":
parser = OptionParser("repl_cleartext_pwd.py [options] server dn cookie_file clear_utf8_name [attid attname attmode] [clear_utf16_name")
sambaopts = options.SambaOptions(parser)
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
(opts, args) = parser.parse_args()
if len(args) == 4:
pass
elif len(args) == 7:
pass
elif len(args) >= 8:
pass
else:
parser.error("more arguments required - given=%d" % (len(args)))
server = args[0]
dn = args[1]
cookie_file = args[2]
if len(cookie_file) == 0:
cookie_file = None
clear_utf8_name = args[3]
if len(args) >= 7:
try:
attid = int(args[4], 16)
except Exception:
attid = int(args[4])
attname = args[5]
attmode = args[6]
if attmode not in ["raw", "utf8"]:
parser.error("attmode should be 'raw' or 'utf8'")
else:
attid = -1
attname = None
attmode = "raw"
if len(args) >= 8:
clear_utf16_name = args[7]
else:
clear_utf16_name = None
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
if not creds.authentication_requested():
parser.error("You must supply credentials")
gls = globals()
try:
f = open(cookie_file, 'r')
store_blob = f.read()
f.close()
store_hdr = store_blob[0:28]
(store_version, \
store_dn_len, store_dn_ofs, \
store_hwm_len, store_hwm_ofs, \
store_utdv_len, store_utdv_ofs) = \
struct.unpack("<LLLLLLL", store_hdr)
store_dn = store_blob[store_dn_ofs:store_dn_ofs+store_dn_len]
store_hwm_blob = store_blob[store_hwm_ofs:store_hwm_ofs+store_hwm_len]
store_utdv_blob = store_blob[store_utdv_ofs:store_utdv_ofs+store_utdv_len]
store_hwm = ndr_unpack(drsuapi.DsReplicaHighWaterMark, store_hwm_blob)
store_utdv = ndr_unpack(drsblobs.replUpToDateVectorBlob, store_utdv_blob)
assert store_dn == dn
#print "%s" % ndr_print(store_hwm)
#print "%s" % ndr_print(store_utdv)
except Exception:
store_dn = dn
store_hwm = drsuapi.DsReplicaHighWaterMark()
store_hwm.tmp_highest_usn = 0
store_hwm.reserved_usn = 0
store_hwm.highest_usn = 0
store_utdv = None
binding_str = "ncacn_ip_tcp:%s[spnego,seal]" % server
drs_conn = drsuapi.drsuapi(binding_str, lp, creds)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT
(info, drs_handle) = drs_conn.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
null_guid = misc.GUID()
naming_context = drsuapi.DsReplicaObjectIdentifier()
naming_context.dn = dn
highwatermark = store_hwm
uptodateness_vector = None
if store_utdv is not None:
uptodateness_vector = drsuapi.DsReplicaCursorCtrEx()
if store_utdv.version == 1:
uptodateness_vector.cursors = store_utdv.cursors
elif store_utdv.version == 2:
cursors = []
for i in range(0, store_utdv.ctr.count):
cursor = drsuapi.DsReplicaCursor()
cursor.source_dsa_invocation_id = store_utdv.ctr.cursors[i].source_dsa_invocation_id
cursor.highest_usn = store_utdv.ctr.cursors[i].highest_usn
cursors.append(cursor)
uptodateness_vector.cursors = cursors
req8 = drsuapi.DsGetNCChangesRequest8()
req8.destination_dsa_guid = null_guid
req8.source_dsa_invocation_id = null_guid
req8.naming_context = naming_context
req8.highwatermark = highwatermark
req8.uptodateness_vector = uptodateness_vector
req8.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_GET_ANC |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
drsuapi.DRSUAPI_DRS_WRIT_REP)
req8.max_object_count = 402
req8.max_ndr_size = 402116
req8.extended_op = 0
req8.fsmo_info = 0
req8.partial_attribute_set = None
req8.partial_attribute_set_ex = None
req8.mapping_ctr.num_mappings = 0
req8.mapping_ctr.mappings = None
user_session_key = drs_conn.user_session_key
print "# starting at usn[%d]" % (highwatermark.highest_usn)
while True:
(level, ctr) = drs_conn.DsGetNCChanges(drs_handle, 8, req8)
if ctr.first_object == None and ctr.object_count != 0:
raise RuntimeError("DsGetNCChanges: NULL first_object with object_count=%u" % (ctr.object_count))
obj_item = ctr.first_object
while obj_item is not None:
obj = obj_item.object
if obj.identifier is None:
obj_item = obj_item.next_object
continue
#print '%s' % obj.identifier.dn
is_deleted = False
for i in range(0, obj.attribute_ctr.num_attributes):
attr = obj.attribute_ctr.attributes[i]
if attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_isDeleted):
is_deleted = True
if is_deleted:
obj_item = obj_item.next_object
continue
spl_crypt = None
attvals = None
for i in range(0, obj.attribute_ctr.num_attributes):
attr = obj.attribute_ctr.attributes[i]
if attid_equal(attr.attid, attid):
attvals = []
for j in range(0, attr.value_ctr.num_values):
assert attr.value_ctr.values[j].blob is not None
val_raw = attr.value_ctr.values[j].blob
val = None
if attmode == "utf8":
val_unicode = unicode(val_raw, 'utf-16-le')
val = val_unicode.encode('utf-8')
elif attmode == "raw":
val = val_raw
else:
assert False, "attmode[%s]" % attmode
attvals.append(val)
if not attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_supplementalCredentials):
continue
assert attr.value_ctr.num_values <= 1
if attr.value_ctr.num_values == 0:
break
assert attr.value_ctr.values[0].blob is not None
spl_crypt = attr.value_ctr.values[0].blob
if spl_crypt is None:
obj_item = obj_item.next_object
continue
assert len(spl_crypt) >= 20
confounder = spl_crypt[0:16]
enc_buffer = spl_crypt[16:]
m5 = hashlib.md5()
m5.update(user_session_key)
m5.update(confounder)
enc_key = m5.digest()
rc4 = Crypto.Cipher.ARC4.new(enc_key)
plain_buffer = rc4.decrypt(enc_buffer)
(crc32_v) = struct.unpack("<L", plain_buffer[0:4])
attr_val = plain_buffer[4:]
crc32_c = binascii.crc32(attr_val) & 0xffffffff
assert int(crc32_v[0]) == int(crc32_c), "CRC32 0x%08X != 0x%08X" % (crc32_v[0], crc32_c)
spl = ndr_unpack(drsblobs.supplementalCredentialsBlob, attr_val)
#print '%s' % ndr_print(spl)
cleartext_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:CLEARTEXT":
continue
cleartext_hex = pkg.data
if cleartext_hex is not None:
cleartext_utf16 = binascii.a2b_hex(cleartext_hex)
if clear_utf16_name is not None:
gls.add_attr(obj.identifier.dn, clear_utf16_name, [cleartext_utf16])
try:
cleartext_unicode = unicode(cleartext_utf16, 'utf-16-le')
cleartext_utf8 = cleartext_unicode.encode('utf-8')
gls.add_attr(obj.identifier.dn, clear_utf8_name, [cleartext_utf8])
except Exception:
pass
if attvals is not None:
gls.add_attr(obj.identifier.dn, attname, attvals)
krb5_old_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:Kerberos":
continue
krb5_old_hex = pkg.data
if krb5_old_hex is not None:
krb5_old_raw = binascii.a2b_hex(krb5_old_hex)
krb5_old = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_old_raw, allow_remaining=True)
#print '%s' % ndr_print(krb5_old)
krb5_new_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:Kerberos-Newer-Keys":
continue
krb5_new_hex = pkg.data
if krb5_new_hex is not None:
krb5_new_raw = binascii.a2b_hex(krb5_new_hex)
krb5_new = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_new_raw, allow_remaining=True)
#print '%s' % ndr_print(krb5_new)
obj_item = obj_item.next_object
gls.print_all()
if ctr.more_data == 0:
store_hwm = ctr.new_highwatermark
store_utdv = drsblobs.replUpToDateVectorBlob()
store_utdv.version = ctr.uptodateness_vector.version
store_utdv_ctr = store_utdv.ctr
store_utdv_ctr.count = ctr.uptodateness_vector.count
store_utdv_ctr.cursors = ctr.uptodateness_vector.cursors
store_utdv.ctr = store_utdv_ctr
#print "%s" % ndr_print(store_hwm)
#print "%s" % ndr_print(store_utdv)
store_hwm_blob = ndr_pack(store_hwm)
store_utdv_blob = ndr_pack(store_utdv)
#
# uint32_t version '1'
# uint32_t dn_str_len
# uint32_t dn_str_ofs
# uint32_t hwm_blob_len
# uint32_t hwm_blob_ofs
# uint32_t utdv_blob_len
# uint32_t utdv_blob_ofs
store_hdr_len = 7 * 4
dn_ofs = store_hdr_len
hwm_ofs = dn_ofs + len(dn)
utdv_ofs = hwm_ofs + len(store_hwm_blob)
store_blob = struct.pack("<LLLLLLL", 1, \
len(dn), dn_ofs,
len(store_hwm_blob), hwm_ofs, \
len(store_utdv_blob), utdv_ofs) + \
dn + store_hwm_blob + store_utdv_blob
tmp_file = "%s.tmp" % cookie_file
f = open(tmp_file, 'wb')
f.write(store_blob)
f.close()
os.rename(tmp_file, cookie_file)
print "# up to usn[%d]" % (ctr.new_highwatermark.highest_usn)
break
print "# up to tmp_usn[%d]" % (ctr.new_highwatermark.highest_usn)
req8.highwatermark = ctr.new_highwatermark
| gpl-3.0 |
163gal/Time-Line | libs/wx/py/PySlices.py | 10 | 3159 | #!/usr/bin/env python
"""PySlices is a python block code editor / shell and namespace browser application."""
# The next two lines, and the other code below that makes use of
# ``__main__`` and ``original``, serve the purpose of cleaning up the
# main namespace to look as much as possible like the regular Python
# shell environment.
import __main__
original = __main__.__dict__.keys()
__author__ = "Patrick K. O'Brien <[email protected]> / "
__author__ += "David N. Mashburn <[email protected]>"
__cvsid__ = "$Id: PySlices.py 36607 2005-12-30 23:02:03Z RD $" # Hmmm...
__revision__ = "$Revision: 36607 $"[11:-2] #Hmmm...
import wx
import os
class App(wx.App):
"""PySlices standalone application."""
def __init__(self, filename=None):
self.filename = filename
import wx
wx.App.__init__(self, redirect=False)
def OnInit(self):
import os
import wx
from wx import py
self.SetAppName("pyslices")
confDir = wx.StandardPaths.Get().GetUserDataDir()
if not os.path.exists(confDir):
os.mkdir(confDir)
fileName = os.path.join(confDir, 'config')
self.config = wx.FileConfig(localFilename=fileName)
self.config.SetRecordDefaults(True)
self.frame = py.crustslices.CrustSlicesFrame(config=self.config, dataDir=confDir,
filename=self.filename)
## self.frame.startupFileName = os.path.join(confDir,'pycrust_startup')
## self.frame.historyFileName = os.path.join(confDir,'pycrust_history')
self.frame.Show()
self.SetTopWindow(self.frame)
return True
'''
The main() function needs to handle being imported, such as with the
pycrust script that wxPython installs:
#!/usr/bin/env python
from wx.py.PySlices import main
main()
'''
def main(filename=None):
"""The main function for the PySlices program."""
# Cleanup the main namespace, leaving the App class.
import sys
if not filename and len(sys.argv) > 1:
filename = sys.argv[1]
if filename:
filename = os.path.realpath(filename)
import __main__
md = __main__.__dict__
keepers = original
keepers.append('App')
keepers.append('filename')
for key in md.keys():
if key not in keepers:
del md[key]
# Create an application instance.
app = App(filename=filename)
# Mimic the contents of the standard Python shell's sys.path.
import sys
if sys.path[0]:
sys.path[0] = ''
# Add the application object to the sys module's namespace.
# This allows a shell user to do:
# >>> import sys
# >>> sys.app.whatever
sys.app = app
del sys
# Cleanup the main namespace some more.
if md.has_key('App') and md['App'] is App:
del md['App']
if md.has_key('filename') and md['filename'] is filename:
del md['filename']
if md.has_key('__main__') and md['__main__'] is __main__:
del md['__main__']
# Start the wxPython event loop.
app.MainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 |
lenovo-yt2-dev/android_kernel_lenovo_baytrail | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
rlutz/xorn | tests/cpython/storage/get_obdata.py | 1 | 1953 | # Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import xorn.storage, Setup
def assert_cannot_get(rev, ob):
try:
rev.get_object_data(ob)
except KeyError:
pass
else:
raise AssertionError
rev0, rev1, rev2, rev3, ob0, ob1a, ob1b = Setup.setup()
assert_cannot_get(rev0, ob0)
assert_cannot_get(rev0, ob1a)
assert_cannot_get(rev0, ob1b)
data = rev1.get_object_data(ob0)
assert data is not None
assert data != Setup.line_data
assert type(data) == type(Setup.line_data)
assert_cannot_get(rev1, ob1a)
assert_cannot_get(rev1, ob1b)
data = rev2.get_object_data(ob0)
assert data is not None
assert data != Setup.line_data
assert type(data) == type(Setup.line_data)
data = rev2.get_object_data(ob1a)
assert data is not None
assert data != Setup.box_data
assert type(data) == type(Setup.box_data)
data = rev2.get_object_data(ob1b)
assert data is not None
assert data != Setup.circle_data
assert type(data) == type(Setup.circle_data)
data = rev3.get_object_data(ob0)
assert data is not None
assert data != Setup.net_data
assert type(data) == type(Setup.net_data)
assert_cannot_get(rev3, ob1a)
data = rev3.get_object_data(ob1b)
assert data is not None
assert data != Setup.circle_data
assert type(data) == type(Setup.circle_data)
| gpl-2.0 |
qguv/config | weechat/community/wee-slack/wee_slack.py | 1 | 197049 | # Copyright (c) 2014-2016 Ryan Huber <[email protected]>
# Copyright (c) 2015-2018 Tollef Fog Heen <[email protected]>
# Copyright (c) 2015-2020 Trygve Aaberge <[email protected]>
# Released under the MIT license.
from __future__ import print_function, unicode_literals
from collections import OrderedDict
from datetime import date, datetime, timedelta
from functools import partial, wraps
from io import StringIO
from itertools import chain, count, islice
import errno
import textwrap
import time
import json
import hashlib
import os
import re
import sys
import traceback
import collections
import ssl
import random
import socket
import string
# Prevent websocket from using numpy (it's an optional dependency). We do this
# because numpy causes python (and thus weechat) to crash when it's reloaded.
# See https://github.com/numpy/numpy/issues/11925
sys.modules["numpy"] = None
from websocket import ABNF, create_connection, WebSocketConnectionClosedException
try:
basestring # Python 2
unicode
str = unicode
except NameError: # Python 3
basestring = unicode = str
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from json import JSONDecodeError
except:
JSONDecodeError = ValueError
# hack to make tests possible.. better way?
try:
import weechat
except ImportError:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <[email protected]>"
SCRIPT_VERSION = "2.4.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
REPO_URL = "https://github.com/wee-slack/wee-slack"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"private": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "conversations.mark",
"info": "conversations.info",
},
"shared": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "conversations.info",
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
command_name = f.__name__.replace('command_', '', 1)
w.prnt('', 'slack: command "{}" must be executed on slack buffer'.format(command_name))
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
NICK_GROUP_EXTERNAL = "2|External"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = {}
EMOJI_WITH_SKIN_TONES_REVERSE = {}
###### Unicode handling
def encode_to_utf8(data):
if sys.version_info.major > 2:
return data
elif isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if sys.version_info.major > 2:
return data
elif isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
class ProxyWrapper(object):
def __init__(self):
self.proxy_name = w.config_string(w.config_get('weechat.network.proxy_curl'))
self.proxy_string = ""
self.proxy_type = ""
self.proxy_address = ""
self.proxy_port = ""
self.proxy_user = ""
self.proxy_password = ""
self.has_proxy = False
if self.proxy_name:
self.proxy_string = "weechat.proxy.{}".format(self.proxy_name)
self.proxy_type = w.config_string(w.config_get("{}.type".format(self.proxy_string)))
if self.proxy_type == "http":
self.proxy_address = w.config_string(w.config_get("{}.address".format(self.proxy_string)))
self.proxy_port = w.config_integer(w.config_get("{}.port".format(self.proxy_string)))
self.proxy_user = w.config_string(w.config_get("{}.username".format(self.proxy_string)))
self.proxy_password = w.config_string(w.config_get("{}.password".format(self.proxy_string)))
self.has_proxy = True
else:
w.prnt("", "\nWarning: weechat.network.proxy_curl is set to {} type (name : {}, conf string : {}). Only HTTP proxy is supported.\n\n".format(self.proxy_type, self.proxy_name, self.proxy_string))
def curl(self):
if not self.has_proxy:
return ""
if self.proxy_user and self.proxy_password:
user = "{}:{}@".format(self.proxy_user, self.proxy_password)
else:
user = ""
if self.proxy_port:
port = ":{}".format(self.proxy_port)
else:
port = ""
return "-x{}{}{}".format(user, self.proxy_address, port)
##### Helpers
def colorize_string(color, string, reset_color='reset'):
if color:
return w.color(color) + string + w.color(reset_color)
else:
return string
def print_error(message, buffer='', warning=False):
prefix = 'Warning' if warning else 'Error'
w.prnt(buffer, '{}{}: {}'.format(w.prefix('error'), prefix, message))
def token_for_print(token):
return '{}...{}'.format(token[:15], token[-10:])
def format_exc_tb():
return decode_from_utf8(traceback.format_exc())
def format_exc_only():
etype, value, _ = sys.exc_info()
return ''.join(decode_from_utf8(traceback.format_exception_only(etype, value)))
def get_nick_color(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
def get_thread_color(thread_id):
if config.color_thread_suffix == 'multiple':
return get_nick_color(thread_id)
else:
return config.color_thread_suffix
def sha1_hex(s):
return hashlib.sha1(s.encode('utf-8')).hexdigest()
def get_functions_with_prefix(prefix):
return {name[len(prefix):]: ref for name, ref in globals().items()
if name.startswith(prefix)}
def handle_socket_error(exception, team, caller_name):
if not (isinstance(exception, WebSocketConnectionClosedException) or
exception.errno in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT)):
raise
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (on {}), reconnecting.'.format(
team.domain, caller_name))
dbg('Socket failed on {} with exception:\n{}'.format(
caller_name, format_exc_tb()), level=5)
team.set_disconnected()
EMOJI_NAME_REGEX = re.compile(':([^: ]+):')
EMOJI_REGEX_STRING = '[\U00000080-\U0010ffff]+'
def regex_match_to_emoji(match, include_name=False):
emoji = match.group(1)
full_match = match.group()
char = EMOJI.get(emoji, full_match)
if include_name and char != full_match:
return '{} ({})'.format(char, full_match)
return char
def replace_string_with_emoji(text):
if config.render_emoji_as_string == 'both':
return EMOJI_NAME_REGEX.sub(
partial(regex_match_to_emoji, include_name=True),
text,
)
elif config.render_emoji_as_string:
return text
return EMOJI_NAME_REGEX.sub(regex_match_to_emoji, text)
def replace_emoji_with_string(text):
return EMOJI_WITH_SKIN_TONES_REVERSE.get(text, text)
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.subteams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = get_functions_with_prefix("command_")
self.proc = get_functions_with_prefix("process_")
self.handlers = get_functions_with_prefix("handle_")
self.local_proc = get_functions_with_prefix("local_process_")
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
self.handle_next_hook = None
self.handle_next_hook_interval = -1
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
return self.context.get(identifier)
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team in self.teams.values():
time_since_last_ping = time.time() - team.last_ping_time
time_since_last_pong = time.time() - team.last_pong_time
if team.connected and time_since_last_ping < 5 and time_since_last_pong > 30:
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (no pong), reconnecting.'.format(
team.domain))
team.set_disconnected()
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
@utf8_decode
def receive_ws_callback(self, team_hash, fd):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
while True:
try:
# Read the data from the websocket associated with this team.
opcode, data = team.ws.recv_data(control_frame=True)
except ssl.SSLWantReadError:
# No more data to read at this time.
return w.WEECHAT_RC_OK
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, team, 'receive')
return w.WEECHAT_RC_OK
if opcode == ABNF.OPCODE_PONG:
team.last_pong_time = time.time()
return w.WEECHAT_RC_OK
elif opcode != ABNF.OPCODE_TEXT:
return w.WEECHAT_RC_OK
message_json = json.loads(data.decode('utf-8'))
message_json["wee_slack_metadata_team"] = team
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive(message_json)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
j["wee_slack_request_metadata"] = request_metadata
self.reply_buffer.pop(request_metadata.response_id)
self.receive(j)
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code == -1:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
else:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
if request_metadata.request.startswith('rtm.'):
retry_text = ('retrying' if request_metadata.should_try() else
'will not retry after too many failed attempts')
w.prnt('', ('Failed connecting to slack team with token {}, {}. ' +
'If this persists, try increasing slack_timeout. Error: {}')
.format(token_for_print(request_metadata.token), retry_text, err))
dbg('rtm.start failed with return_code {}. stack:\n{}'
.format(return_code, ''.join(traceback.format_stack())), level=5)
self.receive(request_metadata)
return w.WEECHAT_RC_OK
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
wanted_interval = 100
if len(self.slow_queue) > 0 or len(self.queue) > 0:
wanted_interval = 10
if self.handle_next_hook is None or wanted_interval != self.handle_next_hook_interval:
if self.handle_next_hook:
w.unhook(self.handle_next_hook)
self.handle_next_hook = w.hook_timer(wanted_interval, 0, 0, "handle_next", "")
self.handle_next_hook_interval = wanted_interval
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
request = j.get("wee_slack_request_metadata")
if request:
team = request.team
channel = request.channel
metadata = request.metadata
else:
team = j.get("wee_slack_metadata_team")
channel = None
metadata = {}
if team:
if "channel" in j:
channel_id = j["channel"]["id"] if type(j["channel"]) == dict else j["channel"]
channel = team.channels.get(channel_id, channel)
if "user" in j:
user_id = j["user"]["id"] if type(j["user"]) == dict else j["user"]
metadata['user'] = team.users.get(user_id)
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, team, channel, metadata)
elif function_name in self.proc:
self.proc[function_name](j, self, team, channel, metadata)
elif function_name in self.handlers:
self.handlers[function_name](j, self, team, channel, metadata)
else:
dbg("Callback not implemented for event: {}".format(function_name))
def handle_next(data, remaining_calls):
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
channel = self.buffers.get(buffer_ptr)
if channel:
channel.destroy_buffer(update_remote)
del self.buffers[buffer_ptr]
if close_buffer:
w.buffer_close(buffer_ptr)
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def ws_ping_cb(data, remaining_calls):
for team in EVENTROUTER.teams.values():
if team.ws and team.connected:
try:
team.ws.ping()
team.last_ping_time = time.time()
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, team, 'ping')
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
Receives a callback from weechat when a buffer is being closed.
"""
EVENTROUTER.weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
def get_id(message_id):
if not message_id:
return 1
elif message_id[0] == "$":
return message_id[1:]
else:
return int(message_id)
message_id_regex = r"(\d*|\$[0-9a-fA-F]{3,})"
reaction = re.match(r"^{}(\+|-)(:(.+):|{})\s*$".format(message_id_regex, EMOJI_REGEX_STRING), data)
substitute = re.match("^{}s/".format(message_id_regex), data)
if reaction:
emoji_match = reaction.group(4) or reaction.group(3)
emoji = replace_emoji_with_string(emoji_match)
if reaction.group(2) == "+":
channel.send_add_reaction(get_id(reaction.group(1)), emoji)
elif reaction.group(2) == "-":
channel.send_remove_reaction(get_id(reaction.group(1)), emoji)
elif substitute:
msg_id = get_id(substitute.group(1))
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msg_id, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data 3) set presence to active
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
set_own_presence_active(new_channel.team)
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(data, signal, current_buffer):
msg = w.buffer_get_string(current_buffer, "input")
if len(msg) > 8 and msg[0] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for team in EVENTROUTER.teams.values():
set_own_presence_active(team)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, item, current_window, current_buffer, extra_info):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for team in EVENTROUTER.teams.values():
for channel in team.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = colorize_string(config.color_typing_notice, "typing: " + typing)
return typing
@utf8_decode
def away_bar_item_cb(data, item, current_window, current_buffer, extra_info):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if not channel:
return ''
if channel.team.is_user_present(channel.team.myidentifier):
return ''
else:
away_color = w.config_string(w.config_get('weechat.color.item_away'))
if channel.team.my_manual_presence == 'away':
return colorize_string(away_color, 'manual away')
else:
return colorize_string(away_color, 'auto away')
@utf8_decode
def channel_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all channels on all teams to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
should_include_channel = lambda channel: channel.active and channel.type in ['channel', 'group', 'private', 'shared']
other_teams = [team for team in EVENTROUTER.teams.values() if not current_channel or team != current_channel.team]
for team in other_teams:
for channel in team.channels.values():
if should_include_channel(channel):
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_SORT)
if current_channel:
for channel in sorted(current_channel.team.channels.values(), key=lambda channel: channel.name, reverse=True):
if should_include_channel(channel):
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_BEGINNING)
if should_include_channel(current_channel):
w.hook_completion_list_add(completion, current_channel.name, 0, w.WEECHAT_LIST_POS_BEGINNING)
return w.WEECHAT_RC_OK
@utf8_decode
def dm_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all dms/mpdms on all teams to completion list
"""
for team in EVENTROUTER.teams.values():
for channel in team.channels.values():
if channel.active and channel.type in ['im', 'mpim']:
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
base_command = w.hook_completion_get_string(completion, "base_command")
if base_command in ['invite', 'msg', 'query', 'whois']:
members = current_channel.team.members
else:
members = current_channel.members
for member in members:
user = current_channel.team.users.get(member)
if user and not user.deleted:
w.hook_completion_list_add(completion, user.name, 1, w.WEECHAT_LIST_POS_SORT)
w.hook_completion_list_add(completion, "@" + user.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
base_word = w.hook_completion_get_string(completion, "base_word")
if ":" not in base_word:
return w.WEECHAT_RC_OK
prefix = base_word.split(":")[0] + ":"
for emoji in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, prefix + emoji + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def thread_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all $-prefixed thread ids to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None or not hasattr(current_channel, 'hashed_messages'):
return w.WEECHAT_RC_OK
threads = current_channel.hashed_messages.items()
for thread_id, message_ts in sorted(threads, key=lambda item: item[1]):
message = current_channel.messages.get(message_ts)
if message and message.number_of_replies():
w.hook_completion_list_add(completion, "$" + thread_id, 0, w.WEECHAT_LIST_POS_BEGINNING)
return w.WEECHAT_RC_OK
@utf8_decode
def topic_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds topic for current channel to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
topic = current_channel.render_topic()
channel_names = [channel.name for channel in current_channel.team.channels.values()]
if topic.split(' ', 1)[0] in channel_names:
topic = '{} {}'.format(current_channel.name, topic)
w.hook_completion_list_add(completion, topic, 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def usergroups_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed usergroups to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
subteam_handles = [subteam.handle for subteam in current_channel.team.subteams.values()]
for group in subteam_handles + ["@channel", "@everyone", "@here"]:
w.hook_completion_list_add(completion, group, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for member in current_channel.members:
user = current_channel.team.users.get(member)
if user and user.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
for team in EVENTROUTER.teams.values():
team.ws.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, team, request, post_data=None, channel=None, metadata=None, retries=3, token=None):
if team is None and token is None:
raise ValueError("Both team and token can't be None")
self.team = team
self.request = request
self.post_data = post_data if post_data else {}
self.channel = channel
self.metadata = metadata if metadata else {}
self.retries = retries
self.token = token if token else team.token
self.tries = 0
self.start_time = time.time()
self.request_normalized = re.sub(r'\W+', '', request)
self.domain = 'api.slack.com'
self.post_data['token'] = self.token
self.url = 'https://{}/api/{}?{}'.format(self.domain, self.request, urlencode(encode_to_utf8(self.post_data)))
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.response_id = sha1_hex('{}{}'.format(self.url, self.start_time))
def __repr__(self):
return ("SlackRequest(team={}, request='{}', post_data={}, retries={}, token='{}', "
"tries={}, start_time={})").format(self.team, self.request, self.post_data,
self.retries, token_for_print(self.token), self.tries, self.start_time)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha1_hex("{}{}".format(self.url, time.time()))
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackSubteam(object):
"""
Represents a slack group or subteam
"""
def __init__(self, originating_team_id, is_member, **kwargs):
self.handle = '@{}'.format(kwargs['handle'])
self.identifier = kwargs['id']
self.name = kwargs['name']
self.description = kwargs.get('description')
self.team_id = originating_team_id
self.is_member = is_member
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def __eq__(self, compare_str):
return compare_str == self.identifier
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, team_hash, websocket_url, team_info, subteams, nick, myidentifier, my_manual_presence, users, bots, channels, **kwargs):
self.identifier = team_info["id"]
self.active = True
self.team_hash = team_hash
self.ws_url = websocket_url
self.connected = False
self.connecting_rtm = False
self.connecting_ws = False
self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.last_ping_time = 0
self.last_pong_time = time.time()
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subteams = subteams
self.team_info = team_info
self.subdomain = team_info["domain"]
self.domain = self.subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
self.my_manual_presence = my_manual_presence
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
self.set_highlight_words(kwargs.get('highlight_words', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.load_emoji_completions()
self.type = "team"
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
return compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain
@property
def members(self):
return self.users.keys()
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI.keys())
if self.emoji_completions:
s = SlackRequest(self, "emoji.list")
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
def generate_usergroup_map(self):
return {s.handle: s.identifier for s in self.subteams.values()}
def create_buffer(self):
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
self.buffer_merge()
def buffer_merge(self, config_value=None):
if not config_value:
config_value = w.config_string(w.config_get('irc.look.server_buffer'))
if config_value == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
else:
w.buffer_unmerge(self.channel_buffer, 0)
def destroy_buffer(self, update_remote):
pass
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',') if x}
for channel in self.channels.values():
channel.set_highlights()
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',') if x}
for channel in self.channels.values():
channel.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data, message=False):
tag_name = "team_message" if message else "team_info"
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag(tag_name), data)
def send_message(self, message, subtype=None, request_dict_ext={}):
w.prnt("", "ERROR: Sending a message in the team buffer is not supported")
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.values():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.name: k for k, v in self.channels.items()}
def get_username_map(self):
return {v.name: k for k, v in self.users.items()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(team_id, subdomain):
return str(sha1_hex("{}{}".format(team_id, subdomain)))
def refresh(self):
self.rename()
def rename(self):
pass
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user and user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting_ws:
if self.ws_url:
self.connecting_ws = True
try:
# only http proxy is currently supported
proxy = ProxyWrapper()
if proxy.has_proxy == True:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs, http_proxy_host=proxy.proxy_address, http_proxy_port=proxy.proxy_port, http_proxy_auth=(proxy.proxy_user, proxy.proxy_password))
else:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
self.set_reconnect_url(None)
self.set_connected()
self.connecting_ws = False
except:
w.prnt(self.channel_buffer,
'Failed connecting to slack team {}, retrying.'.format(self.domain))
dbg('connect failed with exception:\n{}'.format(format_exc_tb()), level=5)
self.connecting_ws = False
return False
elif not self.connecting_rtm:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999, team=self)
self.eventrouter.receive(s)
self.connecting_rtm = True
def set_connected(self):
self.connected = True
self.last_pong_time = time.time()
self.buffer_prnt('Connected to Slack team {} ({}) with username {}'.format(
self.team_info["name"], self.domain, self.nick))
dbg("connected to {}".format(self.domain))
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, self, 'send')
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = list(self.users.keys())[:750]
if self.myidentifier not in users:
users.append(self.myidentifier)
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannelCommon(object):
def send_add_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.add", msg_id, reaction)
def send_remove_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.remove", msg_id, reaction)
def send_change_reaction(self, method, msg_id, reaction):
if type(msg_id) is not int:
if msg_id in self.hashed_messages:
timestamp = str(self.hashed_messages[msg_id])
else:
return
elif 0 < msg_id <= len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_id - 1, None))
else:
return
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team, method, data, channel=self, metadata={'reaction': reaction})
self.eventrouter.receive(s)
def edit_nth_previous_message(self, msg_id, old, new, flags):
message = self.my_last_message(msg_id)
if message is None:
return
if new == "" and old == "":
s = SlackRequest(self.team, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, channel=self)
self.eventrouter.receive(s)
else:
num_replace = 0 if 'g' in flags else 1
f = re.UNICODE
f |= re.IGNORECASE if 'i' in flags else 0
f |= re.MULTILINE if 'm' in flags else 0
f |= re.DOTALL if 's' in flags else 0
new_message = re.sub(old, new, message["text"], num_replace, f)
if new_message != message["text"]:
s = SlackRequest(self.team, "chat.update",
{"channel": self.identifier, "ts": message['ts'], "text": new_message}, channel=self)
self.eventrouter.receive(s)
def my_last_message(self, msg_id):
if type(msg_id) is not int:
ts = self.hashed_messages.get(msg_id)
m = self.messages.get(ts)
if m is not None and m.message_json.get("user") == self.team.myidentifier:
return m.message_json
else:
for key in self.main_message_keys_reversed():
m = self.messages[key]
if m.message_json.get("user") == self.team.myidentifier:
msg_id -= 1
if msg_id == 0:
return m.message_json
def change_message(self, ts, message_json=None, text=None):
ts = SlackTS(ts)
m = self.messages.get(ts)
if not m:
return
if message_json:
m.message_json.update(message_json)
if text:
m.change_text(text)
if type(m) == SlackMessage or config.thread_messages_in_channel:
new_text = self.render(m, force=True)
modify_buffer_line(self.channel_buffer, ts, new_text)
if type(m) == SlackThreadMessage:
thread_channel = m.parent_message.thread_channel
if thread_channel and thread_channel.active:
new_text = thread_channel.render(m, force=True)
modify_buffer_line(thread_channel.channel_buffer, ts, new_text)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(ts):
return sha1_hex(str(ts))
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message.ts)
hl = 3
for i in range(hl, len(tshash) + 1):
shorthash = tshash[:i]
if self.hashed_messages.get(shorthash) == ts:
message.hash = shorthash
return shorthash
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_ts = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_ts)[:hl]
self.hashed_messages[col_new_hash] = col_ts
col_msg = self.messages.get(col_ts)
if col_msg:
col_msg.hash = col_new_hash
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message.ts
message.hash = shorthash
return shorthash
elif ts in self.messages:
return self.messages[ts].hash
class SlackChannel(SlackChannelCommon):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {"value": ""})
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team')
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.thread_channels = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
self.last_line_from = None
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
@property
def muted(self):
return self.identifier in self.team.muted_channels
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
if self.muted and config.muted_channels_activity != "all":
return
for c in range(self.unread_count_display):
if self.type in ["im", "mpim"]:
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group" or self.type == "private":
prepend = config.group_name_prefix
elif self.type == "shared":
prepend = config.shared_name_prefix
else:
prepend = "#"
sidebar_color = config.color_buflist_muted_channels if self.muted else ""
select = {
"default": prepend + self.slack_name,
"sidebar": colorize_string(sidebar_color, prepend + self.slack_name),
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self, fallback_to_purpose=False):
topic = self.topic['value']
if not topic and fallback_to_purpose:
topic = self.slack_purpose['value']
return unhtmlescape(unfurl_refs(topic))
def set_topic(self, value=None):
if value is not None:
self.topic = {"value": value}
if self.channel_buffer:
topic = self.render_topic(fallback_to_purpose=True)
w.buffer_set(self.channel_buffer, "title", topic)
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def highlights(self):
nick_highlights = {'@' + self.team.nick, self.team.myidentifier}
subteam_highlights = {subteam.handle for subteam in self.team.subteams.values()
if subteam.is_member}
highlights = nick_highlights | subteam_highlights | self.team.highlight_words
if self.muted and config.muted_channels_activity == "personal_highlights":
return highlights
else:
return highlights | {"@channel", "@everyone", "@group", "@here"}
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
h_str = ",".join(self.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
if self.muted and config.muted_channels_activity != "all":
notify_level = "0" if config.muted_channels_activity == "none" else "1"
w.buffer_set(self.channel_buffer, "notify", notify_level)
else:
w.buffer_set(self.channel_buffer, "notify", "3")
if self.muted and config.muted_channels_activity == "none":
w.buffer_set(self.channel_buffer, "highlight_tags_restrict", "highlight_force")
else:
w.buffer_set(self.channel_buffer, "highlight_tags_restrict", "")
for thread_channel in self.thread_channels.values():
thread_channel.set_highlights(h_str)
def create_buffer(self):
"""
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.set_highlights()
self.set_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"users": self.user, "return_im": True}, channel=self)
self.eventrouter.receive(s)
def clear_messages(self):
w.buffer_clear(self.channel_buffer)
self.messages = OrderedDict()
self.got_history = False
def destroy_buffer(self, update_remote):
self.clear_messages()
self.channel_buffer = None
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["leave"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, history_message=False, extra_tags=None):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = ts <= last_read
if not backlog:
self.new_messages = True
if not tagset:
if self.type in ["im", "mpim"]:
tagset = "dm"
else:
tagset = "channel"
no_log = history_message and backlog
self_msg = tag_nick == self.team.nick
tags = tag(tagset, user=tag_nick, self_msg=self_msg, backlog=backlog, no_log=no_log, extra_tags=extra_tags)
try:
if (config.unhide_buffers_with_activity
and not self.is_visible() and not self.muted):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if backlog or self_msg:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, subtype=None, request_dict_ext={}):
message = linkify_text(message, self.team)
dbg(message)
if subtype == 'me_message':
s = SlackRequest(self.team, "chat.meMessage", {"channel": self.identifier, "text": message}, channel=self)
self.eventrouter.receive(s)
else:
request = {"type": "message", "channel": self.identifier,
"text": message, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
self.clear_messages()
w.prnt_date_tags(self.channel_buffer, SlackTS().major,
tag(backlog=True, no_log=True), '\tgetting channel history...')
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["history"],
{"channel": self.identifier, "count": BACKLOG_SIZE}, channel=self, metadata={'clear': True})
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.items():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.items():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if not ts:
ts = next(reversed(self.messages), SlackTS())
if ts > self.last_read:
self.last_read = ts
if update_remote:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["mark"],
{"channel": self.identifier, "ts": ts}, channel=self)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim", "private", "shared"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
# Add External nicklist group only for shared channels
if self.type == 'shared':
external = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL)
if not external:
external = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL, 'weechat.color.nicklist_group', 2)
if user and len(self.members) < 1000:
user = self.team.users.get(user)
# External users that have left shared channels won't exist
if not user or user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users.get(user)
if user.deleted:
continue
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, format_exc_only()))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def render(self, message, force=False):
text = message.render(force)
if isinstance(message, SlackThreadMessage):
thread_id = message.parent_message.hash or message.parent_message.ts
return colorize_string(get_thread_color(thread_id), '[{}]'.format(thread_id)) + ' {}'.format(text)
return text
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name if dmuser in users else dmuser
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
if dmuser in users:
self.set_topic(create_user_status_string(users[dmuser].profile))
def set_related_server(self, team):
super(SlackDMChannel, self).set_related_server(team)
if self.user not in self.team.users:
s = SlackRequest(self.team, 'users.info', {'user': self.slack_name}, channel=self)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color(self.name)
else:
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
prepend = ""
if config.show_buflist_presence:
prepend = "+" if present else " "
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
if config.colorize_private_chats and enable_color:
return colorize_string(self.color_name, select[style])
else:
return select[style]
def open(self, update_remote=True):
self.create_buffer()
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"name": self.identifier}, channel=self)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"users": self.user, "return_im": True}, channel=self)
self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
class SlackPrivateChannel(SlackGroupChannel):
"""
A private channel is a private discussion group. At the time of writing, it
differs from group channels in that group channels are channels initially
created as private, while private channels are public channels which are
later converted to private.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackPrivateChannel, self).__init__(eventrouter, **kwargs)
self.type = "private"
def set_related_server(self, team):
super(SlackPrivateChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team, 'conversations.members', {'channel': self.identifier}, channel=self)
self.eventrouter.receive(s)
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, team_users, myidentifier, **kwargs):
kwargs["name"] = ','.join(sorted(
getattr(team_users.get(user_id), 'name', user_id)
for user_id in kwargs["members"]
if user_id != myidentifier
))
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]['join'],
{'users': ','.join(self.members)}, channel=self)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": self.name,
"sidebar": prepend + self.name,
"base": self.name,
"long_default": "{}.{}".format(self.team.preferred_name, self.name),
"long_base": "{}.{}".format(self.team.preferred_name, self.name),
}
return select[style]
def rename(self):
pass
class SlackSharedChannel(SlackChannel):
def __init__(self, eventrouter, **kwargs):
super(SlackSharedChannel, self).__init__(eventrouter, **kwargs)
self.type = 'shared'
def set_related_server(self, team):
super(SlackSharedChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team, 'conversations.members', {'channel': self.identifier}, channel=self)
self.eventrouter.receive(s)
def get_history(self, slow_queue=False):
# Get info for external users in the channel
for user in self.members - set(self.team.users.keys()):
s = SlackRequest(self.team, 'users.info', {'user': user}, channel=self)
self.eventrouter.receive(s)
super(SlackSharedChannel, self).get_history(slow_queue)
def set_name(self, slack_name):
self.name = config.shared_name_prefix + slack_name
class SlackThreadChannel(SlackChannelCommon):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.hashed_messages = {}
self.channel_buffer = None
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
self.last_line_from = None
@property
def identifier(self):
return self.parent_message.channel.identifier
@property
def messages(self):
return self.parent_message.channel.messages
@property
def muted(self):
return self.parent_message.channel.muted
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, tag_nick=None):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
if self.channel_buffer:
if self.parent_message.channel.type in ["im", "mpim"]:
tagset = "dm"
else:
tagset = "channel"
self_msg = tag_nick == self.team.nick
tags = tag(tagset, user=tag_nick, self_msg=self_msg)
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if self_msg:
self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in chain([self.parent_message], self.parent_message.submessages):
text = self.render(message)
self.buffer_prnt(message.sender, text, message.ts, tag_nick=message.sender_plain)
if len(self.parent_message.submessages) < self.parent_message.number_of_replies():
s = SlackRequest(self.team, "conversations.replies",
{"channel": self.identifier, "ts": self.parent_message.ts},
channel=self.parent_message.channel)
self.eventrouter.receive(s)
def main_message_keys_reversed(self):
return (message.ts for message in reversed(self.parent_message.submessages))
def send_message(self, message, subtype=None, request_dict_ext={}):
if subtype == 'me_message':
w.prnt("", "ERROR: /me is not supported in threads")
return w.WEECHAT_RC_ERROR
message = linkify_text(message, self.team)
dbg(message)
request = {"type": "message", "text": message,
"channel": self.parent_message.channel.identifier,
"thread_ts": str(self.parent_message.ts),
"user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def set_highlights(self, highlight_string=None):
if self.channel_buffer:
if highlight_string is None:
highlight_string = ",".join(self.parent_message.channel.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", highlight_string)
def create_buffer(self):
"""
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.set_highlights()
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.render(self.parent_message) )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def destroy_buffer(self, update_remote):
self.channel_buffer = None
self.got_history = False
self.active = False
def render(self, message, force=False):
return message.render(force)
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, originating_team_id, **kwargs):
self.identifier = kwargs["id"]
# These attributes may be missing in the response, so we have to make
# sure they're set
self.profile = {}
self.presence = kwargs.get("presence", "unknown")
self.deleted = kwargs.get("deleted", False)
self.is_external = (not kwargs.get("is_bot") and
kwargs.get("team_id") != originating_team_id)
for key, value in kwargs.items():
setattr(self, key, value)
self.name = nick_from_profile(self.profile, kwargs["name"])
self.username = kwargs["name"]
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color(self.name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
name = prepend + self.name
if enable_color:
return colorize_string(self.color_name, name)
else:
return name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, originating_team_id, **kwargs):
super(SlackBot, self).__init__(originating_team_id, is_bot=True, **kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.ts = SlackTS(message_json['ts'])
def __hash__(self):
return hash(self.ts)
@property
def thread_channel(self):
return self.channel.thread_channels.get(self.ts)
def open_thread(self, switch=False):
if not self.thread_channel or not self.thread_channel.active:
self.channel.thread_channels[self.ts] = SlackThreadChannel(EVENTROUTER, self)
self.thread_channel.open()
if switch:
w.buffer_set(self.thread_channel.channel_buffer, "display", "1")
def render(self, force=False):
# If we already have a rendered version in the object, just return that.
if not force and self.message_json.get("_rendered_text"):
return self.message_json["_rendered_text"]
if "fallback" in self.message_json:
text = self.message_json["fallback"]
elif self.message_json.get("text"):
text = self.message_json["text"]
else:
text = ""
if self.message_json.get('mrkdwn', True):
text = render_formatting(text)
if (self.message_json.get('subtype') in ('channel_join', 'group_join') and
self.message_json.get('inviter')):
inviter_id = self.message_json.get('inviter')
text += " by invitation from <@{}>".format(inviter_id)
if "blocks" in self.message_json:
text += unfurl_blocks(self.message_json)
text = unfurl_refs(text)
if (self.message_json.get('subtype') == 'me_message' and
not self.message_json['text'].startswith(self.sender)):
text = "{} {}".format(self.sender, text)
if "edited" in self.message_json:
text += " " + colorize_string(config.color_edited_suffix, '(edited)')
text += unfurl_refs(unwrap_attachments(self.message_json, text))
text += unfurl_refs(unwrap_files(self.message_json, text))
text = unhtmlescape(text.lstrip().replace("\t", " "))
text += create_reactions_string(
self.message_json.get("reactions", ""), self.team.myidentifier)
if self.number_of_replies():
self.channel.hash_message(self.ts)
text += " " + colorize_string(get_thread_color(self.hash), "[ Thread: {} Replies: {} ]".format(
self.hash, self.number_of_replies()))
text = replace_string_with_emoji(text)
self.message_json["_rendered_text"] = text
return text
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
user = self.team.users.get(self.message_json.get('user'))
if user:
name = "{}".format(user.formatted_name())
name_plain = "{}".format(user.formatted_name(enable_color=False))
if user.is_external:
name += config.external_user_suffix
name_plain += config.external_user_suffix
elif 'username' in self.message_json:
username = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(username)
name_plain = "{}".format(username)
else:
name = "-{}-".format(username)
name_plain = "{}".format(username)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions')
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions')
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
def has_mention(self):
return w.string_has_highlight(unfurl_refs(self.message_json.get('text')),
",".join(self.channel.highlights()))
def number_of_replies(self):
return max(len(self.submessages), len(self.message_json.get("replies", [])))
def notify_thread(self, action=None, sender_id=None):
if config.auto_open_threads:
self.open_thread()
elif sender_id != self.team.myidentifier:
if action == "mention":
template = "You were mentioned in thread {hash}, channel {channel}"
elif action == "participant":
template = "New message in thread {hash}, channel {channel} in which you participated"
elif action == "response":
template = "New message in thread {hash} in response to own message in {channel}"
else:
template = "Notification for message in thread {hash}, channel {channel}"
message = template.format(hash=self.hash, channel=self.channel.formatted_name())
self.team.buffer_prnt(message, message=True)
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_message, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_message = parent_message
class Hdata(object):
def __init__(self, w):
self.buffer = w.hdata_get('buffer')
self.line = w.hdata_get('line')
self.line_data = w.hdata_get('line_data')
self.lines = w.hdata_get('lines')
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
elif isinstance(other, str):
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter, team, channel, metadata):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = login_data["wee_slack_request_metadata"]
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token {}: {}"
.format(token_for_print(metadata.token), login_data["error"]))
if not re.match(r"^xo\w\w(-\d+){3}-[0-9a-f]+$", metadata.token):
w.prnt("", "ERROR: Token does not look like a valid Slack token. "
"Ensure it is a valid token and not just a OAuth code.")
return
self_profile = next(
user["profile"]
for user in login_data["users"]
if user["id"] == login_data["self"]["id"]
)
self_nick = nick_from_profile(self_profile, login_data["self"]["name"])
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['team']['id'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(login_data['team']['id'], **item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(login_data['team']['id'], **item)
subteams = {}
for item in login_data["subteams"]["all"]:
is_member = item['id'] in login_data["subteams"]["self"]
subteams[item['id']] = SlackSubteam(
login_data['team']['id'], is_member=is_member, **item)
channels = {}
for item in login_data["channels"]:
if item["is_shared"]:
channels[item["id"]] = SlackSharedChannel(eventrouter, **item)
elif item["is_private"]:
channels[item["id"]] = SlackPrivateChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["is_mpim"]:
channels[item["id"]] = SlackMPDMChannel(eventrouter, users, login_data["self"]["id"], **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
th,
login_data['url'],
login_data["team"],
subteams,
self_nick,
login_data["self"]["id"],
login_data["self"]["manual_presence"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
if t.myidentifier != login_data["self"]["id"]:
print_error(
'The Slack team {} has tokens for two different users, this is not supported. The '
'token {} is for user {}, and the token {} is for user {}. Please remove one of '
'them.'.format(t.team_info["name"], token_for_print(t.token), t.nick,
token_for_print(metadata.token), self_nick)
)
return
elif metadata.metadata.get('initial_connection'):
print_error(
'Ignoring duplicate Slack tokens for the same team ({}) and user ({}). The two '
'tokens are {} and {}.'.format(t.team_info["name"], t.nick,
token_for_print(t.token), token_for_print(metadata.token)),
warning=True
)
return
else:
t.set_reconnect_url(login_data['url'])
t.connecting_rtm = False
t.connect()
def handle_rtmconnect(login_data, eventrouter, team, channel, metadata):
metadata = login_data["wee_slack_request_metadata"]
team = metadata.team
team.connecting_rtm = False
if not login_data["ok"]:
w.prnt("", "ERROR: Failed reconnecting to Slack with token {}: {}"
.format(token_for_print(metadata.token), login_data["error"]))
return
team.set_reconnect_url(login_data['url'])
team.connect()
def handle_emojilist(emoji_json, eventrouter, team, channel, metadata):
if emoji_json["ok"]:
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, team, channel, metadata):
channel.set_unread_count_display(channel_json['channel'].get('unread_count_display', 0))
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, team, channel, metadatas):
channel.set_unread_count_display(group_json['group'].get('unread_count_display', 0))
def handle_conversationsopen(conversation_json, eventrouter, team, channel, metadata, object_name='channel'):
# Set unread count if the channel isn't new
if channel:
unread_count_display = conversation_json[object_name].get('unread_count_display', 0)
channel.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, team, channel, metadata, object_name='group'):
handle_conversationsopen(mpim_json, eventrouter, team, channel, metadata, object_name)
def handle_history(message_json, eventrouter, team, channel, metadata):
if metadata['clear']:
channel.clear_messages()
channel.got_history = True
for message in reversed(message_json["messages"]):
process_message(message, eventrouter, team, channel, metadata, history_message=True)
handle_channelshistory = handle_history
handle_conversationshistory = handle_history
handle_groupshistory = handle_history
handle_imhistory = handle_history
handle_mpimhistory = handle_history
def handle_conversationsreplies(message_json, eventrouter, team, channel, metadata):
for message in message_json['messages']:
process_message(message, eventrouter, team, channel, metadata)
def handle_conversationsmembers(members_json, eventrouter, team, channel, metadata):
if members_json['ok']:
channel.set_members(members_json['members'])
else:
w.prnt(team.channel_buffer, '{}Couldn\'t load members for channel {}. Error: {}'
.format(w.prefix('error'), channel.name, members_json['error']))
def handle_usersinfo(user_json, eventrouter, team, channel, metadata):
user_info = user_json['user']
if not metadata.get('user'):
user = SlackUser(team.identifier, **user_info)
team.users[user_info['id']] = user
if channel.type == 'shared':
channel.update_nicklist(user_info['id'])
elif channel.type == 'im':
channel.slack_name = user.name
channel.set_topic(create_user_status_string(user.profile))
def handle_usergroupsuserslist(users_json, eventrouter, team, channel, metadata):
header = 'Users in {}'.format(metadata['usergroup_handle'])
users = [team.users[key] for key in users_json['users']]
return print_users_info(team, header, users)
def handle_usersprofileset(json, eventrouter, team, channel, metadata):
if not json['ok']:
w.prnt('', 'ERROR: Failed to set profile: {}'.format(json['error']))
def handle_conversationsinvite(json, eventrouter, team, channel, metadata):
nicks = ', '.join(metadata['nicks'])
if json['ok']:
w.prnt(team.channel_buffer, 'Invited {} to {}'.format(nicks, channel.name))
else:
w.prnt(team.channel_buffer, 'ERROR: Couldn\'t invite {} to {}. Error: {}'
.format(nicks, channel.name, json['error']))
def handle_chatcommand(json, eventrouter, team, channel, metadata):
command = '{} {}'.format(metadata['command'], metadata['command_args']).rstrip()
response = unfurl_refs(json['response']) if 'response' in json else ''
if json['ok']:
response_text = 'Response: {}'.format(response) if response else 'No response'
w.prnt(team.channel_buffer, 'Ran command "{}". {}' .format(command, response_text))
else:
response_text = '. Response: {}'.format(response) if response else ''
w.prnt(team.channel_buffer, 'ERROR: Couldn\'t run command "{}". Error: {}{}'
.format(command, json['error'], response_text))
def handle_reactionsadd(json, eventrouter, team, channel, metadata):
if not json['ok']:
print_error("Couldn't add reaction {}: {}".format(metadata['reaction'], json['error']))
def handle_reactionsremove(json, eventrouter, team, channel, metadata):
if not json['ok']:
print_error("Couldn't remove reaction {}: {}".format(metadata['reaction'], json['error']))
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, team, channel, metadata):
team.subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, team, channel, metadata):
team.set_reconnect_url(message_json['url'])
def process_presence_change(message_json, eventrouter, team, channel, metadata):
users = [team.users[user_id] for user_id in message_json.get("users", [])]
if "user" in metadata:
users.append(metadata["user"])
for user in users:
team.update_member_presence(user, message_json["presence"])
if team.myidentifier in users:
w.bar_item_update("away")
w.bar_item_update("slack_away")
def process_manual_presence_change(message_json, eventrouter, team, channel, metadata):
team.my_manual_presence = message_json["presence"]
w.bar_item_update("away")
w.bar_item_update("slack_away")
def process_pref_change(message_json, eventrouter, team, channel, metadata):
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, team, channel, metadata):
"""
Currently only used to update status, but lots here we could do.
"""
user = metadata['user']
profile = message_json['user']['profile']
if user:
user.update_status(profile.get('status_emoji'), profile.get('status_text'))
dmchannel = team.find_channel_by_members({user.identifier}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, team, channel, metadata):
if channel:
channel.set_typing(metadata["user"].name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, team, channel, metadata):
user = message_json['user']
team.users[user["id"]] = SlackUser(team.identifier, **user)
def process_pong(message_json, eventrouter, team, channel, metadata):
team.last_pong_time = time.time()
def process_message(message_json, eventrouter, team, channel, metadata, history_message=False):
if SlackTS(message_json["ts"]) in channel.messages:
return
if "thread_ts" in message_json and "reply_count" not in message_json and "subtype" not in message_json:
if message_json.get("reply_broadcast"):
message_json["subtype"] = "thread_broadcast"
else:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype")
subtype_functions = get_functions_with_prefix("subprocess_")
if subtype in subtype_functions:
subtype_functions[subtype](message_json, eventrouter, team, channel, history_message)
else:
message = SlackMessage(message_json, team, channel)
channel.store_message(message, team)
text = channel.render(message)
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
if subtype == 'me_message':
prefix = w.prefix("action").rstrip()
else:
prefix = message.sender
channel.buffer_prnt(prefix, text, message.ts, tag_nick=message.sender_plain, history_message=history_message)
channel.unread_count_display += 1
dbg("NORMAL REPLY {}".format(message_json))
if not history_message:
download_files(message_json, team)
def download_files(message_json, team):
download_location = config.files_download_location
if not download_location:
return
download_location = w.string_eval_path_home(download_location, {}, {}, {})
if not os.path.exists(download_location):
try:
os.makedirs(download_location)
except:
w.prnt('', 'ERROR: Failed to create directory at files_download_location: {}'
.format(format_exc_only()))
def fileout_iter(path):
yield path
main, ext = os.path.splitext(path)
for i in count(start=1):
yield main + "-{}".format(i) + ext
for f in message_json.get('files', []):
if f.get('mode') == 'tombstone':
continue
filetype = '' if f['title'].endswith(f['filetype']) else '.' + f['filetype']
filename = '{}_{}{}'.format(team.preferred_name, f['title'], filetype)
for fileout in fileout_iter(os.path.join(download_location, filename)):
if os.path.isfile(fileout):
continue
w.hook_process_hashtable(
"url:" + f['url_private'],
{
'file_out': fileout,
'httpheader': 'Authorization: Bearer ' + team.token
},
config.slack_timeout, "", "")
break
def subprocess_thread_message(message_json, eventrouter, team, channel, history_message):
parent_ts = message_json.get('thread_ts')
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts))
if parent_message:
message = SlackThreadMessage(
parent_message, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
if parent_message.thread_channel and parent_message.thread_channel.active:
parent_message.thread_channel.buffer_prnt(message.sender, parent_message.thread_channel.render(message), message.ts, tag_nick=message.sender_plain)
elif message.ts > channel.last_read and message.has_mention():
parent_message.notify_thread(action="mention", sender_id=message_json["user"])
if config.thread_messages_in_channel or message_json["subtype"] == "thread_broadcast":
thread_tag = "thread_broadcast" if message_json["subtype"] == "thread_broadcast" else "thread_message"
channel.buffer_prnt(
message.sender,
channel.render(message),
message.ts,
tag_nick=message.sender_plain,
history_message=history_message,
extra_tags=[thread_tag],
)
subprocess_thread_broadcast = subprocess_thread_message
def subprocess_channel_join(message_json, eventrouter, team, channel, history_message):
prefix_join = w.prefix("join").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_join)
channel.buffer_prnt(prefix_join, channel.render(message), message_json["ts"], tagset='join', tag_nick=message.get_sender()[1], history_message=history_message)
channel.user_joined(message_json['user'])
channel.store_message(message, team)
def subprocess_channel_leave(message_json, eventrouter, team, channel, history_message):
prefix_leave = w.prefix("quit").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_leave)
channel.buffer_prnt(prefix_leave, channel.render(message), message_json["ts"], tagset='leave', tag_nick=message.get_sender()[1], history_message=history_message)
channel.user_left(message_json['user'])
channel.store_message(message, team)
def subprocess_channel_topic(message_json, eventrouter, team, channel, history_message):
prefix_topic = w.prefix("network").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_topic)
channel.buffer_prnt(prefix_topic, channel.render(message), message_json["ts"], tagset="topic", tag_nick=message.get_sender()[1], history_message=history_message)
channel.set_topic(message_json["topic"])
channel.store_message(message, team)
subprocess_group_join = subprocess_channel_join
subprocess_group_leave = subprocess_channel_leave
subprocess_group_topic = subprocess_channel_topic
def subprocess_message_replied(message_json, eventrouter, team, channel, history_message):
parent_ts = message_json["message"].get("thread_ts")
parent_message = channel.messages.get(SlackTS(parent_ts))
# Thread exists but is not open yet
if parent_message is not None \
and not (parent_message.thread_channel and parent_message.thread_channel.active):
channel.hash_message(parent_ts)
last_message = max(message_json["message"]["replies"], key=lambda x: x["ts"])
if message_json["message"].get("user") == team.myidentifier:
parent_message.notify_thread(action="response", sender_id=last_message["user"])
elif any(team.myidentifier == r["user"] for r in message_json["message"]["replies"]):
parent_message.notify_thread(action="participant", sender_id=last_message["user"])
def subprocess_message_changed(message_json, eventrouter, team, channel, history_message):
new_message = message_json.get("message")
channel.change_message(new_message["ts"], message_json=new_message)
def subprocess_message_deleted(message_json, eventrouter, team, channel, history_message):
message = colorize_string(config.color_deleted, '(deleted)')
channel.change_message(message_json["deleted_ts"], text=message)
def process_reply(message_json, eventrouter, team, channel, metadata):
reply_to = int(message_json["reply_to"])
original_message_json = team.ws_replies.pop(reply_to, None)
if original_message_json:
original_message_json.update(message_json)
channel = team.channels[original_message_json.get('channel')]
process_message(original_message_json, eventrouter, team=team, channel=channel, metadata={})
dbg("REPLY {}".format(message_json))
else:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, team, channel, metadata):
ts = message_json.get("ts")
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
process_group_marked = process_channel_marked
process_im_marked = process_channel_marked
process_mpim_marked = process_channel_marked
def process_channel_joined(message_json, eventrouter, team, channel, metadata):
channel.update_from_message_json(message_json["channel"])
channel.open()
def process_channel_created(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
item['is_member'] = False
channel = SlackChannel(eventrouter, team=team, **item)
team.channels[item["id"]] = channel
team.buffer_prnt('Channel created: {}'.format(channel.slack_name))
def process_channel_rename(message_json, eventrouter, team, channel, metadata):
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
channel = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = channel
team.buffer_prnt('IM channel created: {}'.format(channel.name))
def process_im_open(message_json, eventrouter, team, channel, metadata):
channel.check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, team, channel, metadata):
if channel.channel_buffer:
w.prnt(team.channel_buffer,
'IM {} closed by another client or the server'.format(channel.name))
eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, False, True)
def process_group_joined(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
channel = SlackMPDMChannel(eventrouter, team.users, team.myidentifier, team=team, **item)
else:
channel = SlackGroupChannel(eventrouter, team=team, **item)
team.channels[item["id"]] = channel
channel.open()
def process_reaction_added(message_json, eventrouter, team, channel, metadata):
channel = team.channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, team, channel, metadata):
channel = team.channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_subteam_created(subteam_json, eventrouter, team, channel, metadata):
subteam_json_info = subteam_json['subteam']
is_member = team.myidentifier in subteam_json_info.get('users', [])
subteam = SlackSubteam(team.identifier, is_member=is_member, **subteam_json_info)
team.subteams[subteam_json_info['id']] = subteam
def process_subteam_updated(subteam_json, eventrouter, team, channel, metadata):
current_subteam_info = team.subteams[subteam_json['subteam']['id']]
is_member = team.myidentifier in subteam_json['subteam'].get('users', [])
new_subteam_info = SlackSubteam(team.identifier, is_member=is_member, **subteam_json['subteam'])
team.subteams[subteam_json['subteam']['id']] = new_subteam_info
if current_subteam_info.is_member != new_subteam_info.is_member:
for channel in team.channels.values():
channel.set_highlights()
if config.notify_usergroup_handle_updated and current_subteam_info.handle != new_subteam_info.handle:
message = 'User group {old_handle} has updated its handle to {new_handle} in team {team}.'.format(
name=current_subteam_info.handle, handle=new_subteam_info.handle, team=team.preferred_name)
team.buffer_prnt(message, message=True)
def process_emoji_changed(message_json, eventrouter, team, channel, metadata):
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*\n`]+)\*(?=[^\w]|$)',
r'\1{}*\2*{}'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text,
flags=re.UNICODE)
text = re.sub(r'(^| )_([^_\n`]+)_(?=[^\w]|$)',
r'\1{}_\2_{}'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text,
flags=re.UNICODE)
return text
def linkify_text(message, team, only_users=False):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
usergroups = team.generate_usergroup_map()
message_escaped = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
def linkify_word(match):
word = match.group(0)
prefix, name = match.groups()
if prefix == "@":
if name in ["channel", "everyone", "group", "here"]:
return "<!{}>".format(name)
elif name in usernames:
return "<@{}>".format(usernames[name])
elif word in usergroups.keys():
return "<!subteam^{}|{}>".format(usergroups[word], word)
elif prefix == "#" and not only_users:
if word in channels:
return "<#{}|{}>".format(channels[word], name)
return word
linkify_regex = r'(?:^|(?<=\s))([@#])([\w\(\)\'.-]+)'
return re.sub(linkify_regex, linkify_word, message_escaped, flags=re.UNICODE)
def unfurl_blocks(message_json):
block_text = [""]
for block in message_json["blocks"]:
try:
if block["type"] == "section":
fields = block.get("fields", [])
if "text" in block:
fields.insert(0, block["text"])
block_text.extend(unfurl_block_element(field) for field in fields)
elif block["type"] == "actions":
elements = []
for element in block["elements"]:
if element["type"] == "button":
elements.append(unfurl_block_element(element["text"]))
else:
elements.append(colorize_string(config.color_deleted,
'<<Unsupported block action type "{}">>'.format(element["type"])))
block_text.append(" | ".join(elements))
elif block["type"] == "call":
block_text.append("Join via " + block["call"]["v1"]["join_url"])
elif block["type"] == "divider":
block_text.append("---")
elif block["type"] == "context":
block_text.append(" | ".join(unfurl_block_element(el) for el in block["elements"]))
elif block["type"] == "image":
if "title" in block:
block_text.append(unfurl_block_element(block["title"]))
block_text.append(unfurl_block_element(block))
elif block["type"] == "rich_text":
continue
else:
block_text.append(colorize_string(config.color_deleted,
'<<Unsupported block type "{}">>'.format(block["type"])))
dbg('Unsupported block: "{}"'.format(json.dumps(block)), level=4)
except Exception as e:
dbg("Failed to unfurl block ({}): {}".format(repr(e), json.dumps(block)), level=4)
return "\n".join(block_text)
def unfurl_block_element(text):
if text["type"] == "mrkdwn":
return render_formatting(text["text"])
elif text["type"] == "plain_text":
return text["text"]
elif text["type"] == "image":
return "{} ({})".format(text["image_url"], text["alt_text"])
def unfurl_refs(text):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# - <!subteam^U2147483697|@group>
# Test patterns lives in ./_pytest/test_unfurl.py
def unfurl_ref(match):
ref, fallback = match.groups()
resolved_ref = resolve_ref(ref)
if resolved_ref != ref:
return resolved_ref
if fallback and not config.unfurl_ignore_alt_text:
if ref.startswith("#"):
return "#{}".format(fallback)
elif ref.startswith("@"):
return fallback
elif ref.startswith("!subteam"):
prefix = "@" if not fallback.startswith("@") else ""
return prefix + fallback
elif ref.startswith("!date"):
return fallback
else:
match_url = r"^\w+:(//)?{}$".format(re.escape(fallback))
url_matches_desc = re.match(match_url, ref)
if url_matches_desc and config.unfurl_auto_link_display == "text":
return fallback
elif url_matches_desc and config.unfurl_auto_link_display == "url":
return ref
else:
return "{} ({})".format(ref, fallback)
return ref
return re.sub(r"<([^|>]*)(?:\|([^>]*))?>", unfurl_ref, text)
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments")
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title')
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text")
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
image_url = attachment.get('image_url', '')
if image_url not in text_before_unescaped and image_url != title_link:
t.append(image_url)
fields = attachment.get("fields")
if fields:
for f in fields:
if f.get('title'):
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback")
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def unwrap_files(message_json, text_before):
files_texts = []
for f in message_json.get('files', []):
if f.get('mode', '') != 'tombstone':
text = '{} ({})'.format(f['url_private'], f['title'])
else:
text = colorize_string(config.color_deleted, '(This file was deleted.)')
files_texts.append(text)
if text_before:
files_texts.insert(0, '')
return "\n".join(files_texts)
def resolve_ref(ref):
if ref in ['!channel', '!everyone', '!group', '!here']:
return ref.replace('!', '@')
for team in EVENTROUTER.teams.values():
if ref.startswith('@'):
user = team.users.get(ref[1:])
if user:
suffix = config.external_user_suffix if user.is_external else ''
return '@{}{}'.format(user.name, suffix)
elif ref.startswith('#'):
channel = team.channels.get(ref[1:])
if channel:
return channel.name
elif ref.startswith('!subteam'):
_, subteam_id = ref.split('^')
subteam = team.subteams.get(subteam_id)
if subteam:
return subteam.handle
elif ref.startswith("!date"):
parts = ref.split('^')
ref_datetime = datetime.fromtimestamp(int(parts[1]))
link_suffix = ' ({})'.format(parts[3]) if len(parts) > 3 else ''
token_to_format = {
'date_num': '%Y-%m-%d',
'date': '%B %d, %Y',
'date_short': '%b %d, %Y',
'date_long': '%A, %B %d, %Y',
'time': '%H:%M',
'time_secs': '%H:%M:%S'
}
def replace_token(match):
token = match.group(1)
if token.startswith('date_') and token.endswith('_pretty'):
if ref_datetime.date() == date.today():
return 'today'
elif ref_datetime.date() == date.today() - timedelta(days=1):
return 'yesterday'
elif ref_datetime.date() == date.today() + timedelta(days=1):
return 'tomorrow'
else:
token = token.replace('_pretty', '')
if token in token_to_format:
return ref_datetime.strftime(token_to_format[token])
else:
return match.group(0)
return re.sub(r"{([^}]+)}", replace_token, parts[2]) + link_suffix
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = replace_string_with_emoji(profile.get("status_emoji", ""))
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reaction, myidentifier):
if config.show_reaction_nicks:
nicks = [resolve_ref('@{}'.format(user)) for user in reaction['users']]
users = '({})'.format(','.join(nicks))
else:
users = len(reaction['users'])
reaction_string = ':{}:{}'.format(reaction['name'], users)
if myidentifier in reaction['users']:
return colorize_string(config.color_reaction_suffix_added_by_you, reaction_string,
reset_color=config.color_reaction_suffix)
else:
return reaction_string
def create_reactions_string(reactions, myidentifier):
reactions_with_users = [r for r in reactions if len(r['users']) > 0]
reactions_string = ' '.join(create_reaction_string(r, myidentifier) for r in reactions_with_users)
if reactions_string:
return ' ' + colorize_string(config.color_reaction_suffix, '[{}]'.format(reactions_string))
else:
return ''
def hdata_line_ts(line_pointer):
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
ts_major = w.hdata_time(hdata.line_data, data, 'date')
ts_minor = w.hdata_time(hdata.line_data, data, 'date_printed')
return (ts_major, ts_minor)
def modify_buffer_line(buffer_pointer, ts, new_text):
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
# Find the last line with this ts
while line_pointer and hdata_line_ts(line_pointer) != (ts.major, ts.minor):
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
# Find all lines for the message
pointers = []
while line_pointer and hdata_line_ts(line_pointer) == (ts.major, ts.minor):
pointers.append(line_pointer)
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
pointers.reverse()
# Split the message into at most the number of existing lines as we can't insert new lines
lines = new_text.split('\n', len(pointers) - 1)
# Replace newlines to prevent garbled lines in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# Extend lines in case the new message is shorter than the old as we can't delete lines
lines += [''] * (len(pointers) - len(lines))
for pointer, line in zip(pointers, lines):
data = w.hdata_pointer(hdata.line, pointer, 'data')
w.hdata_update(hdata.line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_last_print_time(buffer_pointer, ts_minor):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
while line_pointer:
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
w.hdata_update(hdata.line_data, data, {"date_printed": str(ts_minor)})
if w.hdata_string(hdata.line_data, data, 'prefix'):
# Reached the first line of the message, so stop here
break
# Move one line backwards so all lines of the message are set
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
return w.WEECHAT_RC_OK
def nick_from_profile(profile, username):
full_name = profile.get('real_name') or username
if config.use_full_names:
nick = full_name
else:
nick = profile.get('display_name') or full_name
return nick.replace(' ', '')
def format_nick(nick, previous_nick=None):
if nick == previous_nick:
nick = w.config_string(w.config_get('weechat.look.prefix_same_nick')) or nick
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
return colorize_string(nick_prefix_color_name, nick_prefix) + nick + colorize_string(nick_suffix_color_name, nick_suffix)
def tag(tagset=None, user=None, self_msg=False, backlog=False, no_log=False, extra_tags=None):
tagsets = {
"team_info": {"no_highlight", "log3"},
"team_message": {"irc_privmsg", "notify_message", "log1"},
"dm": {"irc_privmsg", "notify_private", "log1"},
"join": {"irc_join", "no_highlight", "log4"},
"leave": {"irc_part", "no_highlight", "log4"},
"topic": {"irc_topic", "no_highlight", "log3"},
"channel": {"irc_privmsg", "notify_message", "log1"},
}
nick_tag = {"nick_{}".format(user).replace(" ", "_")} if user else set()
slack_tag = {"slack_{}".format(tagset or "default")}
tags = nick_tag | slack_tag | tagsets.get(tagset, set())
if self_msg or backlog:
tags -= {"notify_highlight", "notify_message", "notify_private"}
tags |= {"notify_none", "no_highlight"}
if self_msg:
tags |= {"self_msg"}
if backlog:
tags |= {"logger_backlog"}
if no_log:
tags |= {"no_log"}
tags = {tag for tag in tags if not tag.startswith("log") or tag == "logger_backlog"}
if extra_tags:
tags |= set(extra_tags)
return ",".join(tags)
def set_own_presence_active(team):
slackbot = team.get_channel_map()['Slackbot']
channel = team.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def invite_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split()[1:]
if not split_args:
w.prnt('', 'Too few arguments for command "/invite" (help on command: /help invite)')
return w.WEECHAT_RC_OK_EAT
if split_args[-1].startswith("#") or split_args[-1].startswith(config.group_name_prefix):
nicks = split_args[:-1]
channel = team.channels.get(team.get_channel_map().get(split_args[-1]))
if not nicks or not channel:
w.prnt('', '{}: No such nick/channel'.format(split_args[-1]))
return w.WEECHAT_RC_OK_EAT
else:
nicks = split_args
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
all_users = team.get_username_map()
users = set()
for nick in nicks:
user = all_users.get(nick.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(nick))
return w.WEECHAT_RC_OK_EAT
users.add(user)
s = SlackRequest(team, "conversations.invite", {"channel": channel.identifier, "users": ",".join(users)},
channel=channel, metadata={"nicks": nicks})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
w.prnt(team.channel_buffer, "{}: No such channel".format(channel))
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer,
'Topic for {} is "{}"'.format(channel.name, channel.render_topic()))
else:
s = SlackRequest(team, "conversations.setTopic",
{"channel": channel.identifier, "topic": linkify_text(topic, team)}, channel=channel)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <nick>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
def print_profile(field):
value = u.profile.get(field)
if value:
team.buffer_prnt("[{}]: {}: {}".format(user, field, value))
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
status_emoji = replace_string_with_emoji(u.profile.get("status_emoji", ""))
status_text = u.profile.get("status_text", "")
if status_emoji or status_text:
team.buffer_prnt("[{}]: {} {}".format(user, status_emoji, status_text))
team.buffer_prnt("[{}]: username: {}".format(user, u.username))
team.buffer_prnt("[{}]: id: {}".format(user, u.identifier))
print_profile('title')
print_profile('email')
print_profile('phone')
print_profile('skype')
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
message = args.split(' ', 1)[1]
channel.send_message(message, subtype='me_message')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_register(data, current_buffer, args):
"""
/slack register [code/token]
Register a Slack team in wee-slack. Call this without any arguments and
follow the instructions to register a new team. If you already have a token
for a team, you can call this with that token to add it.
"""
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
REDIRECT_URI = "https%3A%2F%2Fwee-slack.github.io%2Fwee-slack%2Foauth%23"
if not args:
message = textwrap.dedent("""
### Connecting to a Slack team with OAuth ###
1) Paste this link into a browser: https://slack.com/oauth/authorize?client_id={}&scope=client&redirect_uri={}
2) Select the team you wish to access from wee-slack in your browser. If you want to add multiple teams, you will have to repeat this whole process for each team.
3) Click "Authorize" in the browser.
If you get a message saying you are not authorized to install wee-slack, the team has restricted Slack app installation and you will have to request it from an admin. To do that, go to https://my.slack.com/apps/A1HSZ9V8E-wee-slack and click "Request to Install".
4) The web page will show a command in the form `/slack register <code>`. Run this command in weechat.
""").strip().format(CLIENT_ID, REDIRECT_URI)
w.prnt("", message)
return w.WEECHAT_RC_OK_EAT
elif args.startswith('xox'):
add_token(args)
return w.WEECHAT_RC_OK_EAT
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&redirect_uri={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, args)
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
w.hook_process_hashtable('url:', params, config.slack_timeout, "", "")
w.hook_process_hashtable("url:{}".format(uri), params, config.slack_timeout, "register_callback", "")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def register_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got return code {}. Err: {}".format(return_code, err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
if len(out) <= 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got 0 length answer. Err: {}".format(err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
d = json.loads(out)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return w.WEECHAT_RC_OK_EAT
add_token(d['access_token'], d['team_name'])
return w.WEECHAT_RC_OK_EAT
def add_token(token, team_name=None):
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', token)
else:
# Add new token to existing set, joined by comma.
existing_tokens = config.get_string('slack_api_token')
if token in existing_tokens:
print_error('This token is already registered')
return
w.config_set_plugin('slack_api_token', ','.join([existing_tokens, token]))
if team_name:
w.prnt("", "Success! Added team \"{}\"".format(team_name))
else:
w.prnt("", "Success! Added token")
w.prnt("", "Please reload wee-slack with: /python reload slack")
w.prnt("", "If you want to add another team you can repeat this process from step 1 before reloading wee-slack.")
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
aargs = args.split(None, 2)
who = aargs[1].lstrip('@')
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].name
else:
join_query_command_cb(data, current_buffer, '/query ' + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
def print_team_items_info(team, header, items, extra_info_function):
team.buffer_prnt("{}:".format(header))
if items:
max_name_length = max(len(item.name) for item in items)
for item in sorted(items, key=lambda item: item.name.lower()):
extra_info = extra_info_function(item)
team.buffer_prnt(" {:<{}}({})".format(item.name, max_name_length + 2, extra_info))
return w.WEECHAT_RC_OK_EAT
def print_users_info(team, header, users):
def extra_info_function(user):
external_text = ", external" if user.is_external else ""
return user.presence + external_text
return print_team_items_info(team, header, users, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_teams(data, current_buffer, args):
"""
/slack teams
List the connected Slack teams.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
teams = EVENTROUTER.teams.values()
extra_info_function = lambda team: "token: {}".format(token_for_print(team.token))
return print_team_items_info(team, "Slack teams", teams, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
"""
/slack channels
List the channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
channels = [channel for channel in team.channels.values() if channel.type not in ['im', 'mpim']]
def extra_info_function(channel):
if channel.active:
return "member"
elif getattr(channel, "is_archived", None):
return "archived"
else:
return "not a member"
return print_team_items_info(team, "Channels", channels, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
"""
/slack users
List the users in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
return print_users_info(team, "Users", team.users.values())
@slack_buffer_required
@utf8_decode
def command_usergroups(data, current_buffer, args):
"""
/slack usergroups [handle]
List the usergroups in the current team
If handle is given show the members in the usergroup
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
usergroups = team.generate_usergroup_map()
usergroup_key = usergroups.get(args)
if usergroup_key:
s = SlackRequest(team, "usergroups.users.list", {"usergroup": usergroup_key},
metadata={'usergroup_handle': args})
EVENTROUTER.receive(s)
elif args:
w.prnt('', 'ERROR: Unknown usergroup handle: {}'.format(args))
return w.WEECHAT_RC_ERROR
else:
def extra_info_function(subteam):
is_member = 'member' if subteam.is_member else 'not a member'
return '{}, {}'.format(subteam.handle, is_member)
return print_team_items_info(team, "Usergroups", team.subteams.values(), extra_info_function)
return w.WEECHAT_RC_OK_EAT
command_usergroups.completion = '%(usergroups)'
@slack_buffer_required
@utf8_decode
def command_talk(data, current_buffer, args):
"""
/slack talk <user>[,<user2>[,<user3>...]]
Open a chat with the specified user(s).
"""
if not args:
w.prnt('', 'Usage: /slack talk <user>[,<user2>[,<user3>...]]')
return w.WEECHAT_RC_ERROR
return join_query_command_cb(data, current_buffer, '/query ' + args)
command_talk.completion = '%(nicks)'
@slack_buffer_or_ignore
@utf8_decode
def join_query_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
if len(split_args) < 2 or not split_args[1]:
w.prnt('', 'Too few arguments for command "{}" (help on command: /help {})'
.format(split_args[0], split_args[0].lstrip('/')))
return w.WEECHAT_RC_OK_EAT
query = split_args[1]
# Try finding the channel by name
channel = team.channels.get(team.get_channel_map().get(query))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not channel:
if query.startswith('#'):
w.prnt('', 'ERROR: Unknown channel: {}'.format(query))
return w.WEECHAT_RC_OK_EAT
# Get the IDs of the users
all_users = team.get_username_map()
users = set()
for username in query.split(','):
user = all_users.get(username.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(username))
return w.WEECHAT_RC_OK_EAT
users.add(user)
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
channel = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not channel:
s = SlackRequest(team, SLACK_API_TRANSLATOR[channel_type]['join'],
{'users': ','.join(users)})
EVENTROUTER.receive(s)
if channel:
channel.open()
if config.switch_buffer_on_join:
w.buffer_set(channel.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_showmuted(data, current_buffer, args):
"""
/slack showmuted
List the muted channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
muted_channels = [team.channels[key].name
for key in team.muted_channels if key in team.channels]
team.buffer_prnt("Muted channels: {}".format(', '.join(muted_channels)))
return w.WEECHAT_RC_OK_EAT
def get_msg_from_id(channel, msg_id):
if msg_id[0] == '$':
msg_id = msg_id[1:]
ts = channel.hashed_messages.get(msg_id)
return channel.messages.get(ts)
@slack_buffer_required
@utf8_decode
def command_thread(data, current_buffer, args):
"""
/thread [message_id]
Open the thread for the message.
If no message id is specified the last thread in channel will be opened.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not isinstance(channel, SlackChannelCommon):
print_error('/thread can not be used in the team buffer, only in a channel')
return w.WEECHAT_RC_ERROR
if args:
msg = get_msg_from_id(channel, args)
if not msg:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
else:
for message in reversed(channel.messages.values()):
if type(message) == SlackMessage and message.number_of_replies():
msg = message
break
else:
w.prnt('', 'ERROR: No threads found in channel')
return w.WEECHAT_RC_OK_EAT
msg.open_thread(switch=config.switch_buffer_on_join)
return w.WEECHAT_RC_OK_EAT
command_thread.completion = '%(threads)'
@slack_buffer_required
@utf8_decode
def command_reply(data, current_buffer, args):
"""
/reply [-alsochannel] [<count/message_id>] <message>
When in a channel buffer:
/reply [-alsochannel] <count/message_id> <message>
Reply in a thread on the message. Specify either the message id or a count
upwards to the message from the last message.
When in a thread buffer:
/reply [-alsochannel] <message>
Reply to the current thread. This can be used to send the reply to the
rest of the channel.
In either case, -alsochannel also sends the reply to the parent channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
parts = args.split(None, 1)
if parts[0] == "-alsochannel":
args = parts[1]
broadcast = True
else:
broadcast = False
if isinstance(channel, SlackThreadChannel):
text = args
msg = channel.parent_message
else:
try:
msg_id, text = args.split(None, 1)
except ValueError:
w.prnt('', 'Usage (when in a channel buffer): /reply [-alsochannel] <count/message_id> <message>')
return w.WEECHAT_RC_OK_EAT
msg = get_msg_from_id(channel, msg_id)
if msg:
if isinstance(msg, SlackThreadMessage):
parent_id = str(msg.parent_message.ts)
else:
parent_id = str(msg.ts)
elif msg_id.isdigit() and int(msg_id) >= 1:
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, int(msg_id) - 1, None)))
else:
w.prnt('', 'ERROR: Invalid id given, must be a number greater than 0 or an existing id')
return w.WEECHAT_RC_OK_EAT
channel.send_message(text, request_dict_ext={'thread_ts': parent_id, 'reply_broadcast': broadcast})
return w.WEECHAT_RC_OK_EAT
command_reply.completion = '-alsochannel %(threads)||%(threads)'
@slack_buffer_required
@utf8_decode
def command_rehistory(data, current_buffer, args):
"""
/rehistory
Reload the history in the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.clear_messages()
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_hide(data, current_buffer, args):
"""
/hide
Hide the current channel if it is marked as distracting.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
name = channel.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(channel.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
split_args = args.split(' ', 1)
cmd_name = split_args[0]
cmd_args = split_args[1] if len(split_args) > 1 else ''
cmd = EVENTROUTER.cmds.get(cmd_name or 'help')
if not cmd:
w.prnt('', 'Command not found: ' + cmd_name)
return w.WEECHAT_RC_OK
return cmd(data, current_buffer, cmd_args)
@utf8_decode
def command_help(data, current_buffer, args):
"""
/slack help [command]
Print help for /slack commands.
"""
if args:
cmd = EVENTROUTER.cmds.get(args)
if cmd:
cmds = {args: cmd}
else:
w.prnt('', 'Command not found: ' + args)
return w.WEECHAT_RC_OK
else:
cmds = EVENTROUTER.cmds
w.prnt('', '\n{}'.format(colorize_string('bold', 'Slack commands:')))
script_prefix = '{0}[{1}python{0}/{1}slack{0}]{1}'.format(w.color('green'), w.color('reset'))
for _, cmd in sorted(cmds.items()):
name, cmd_args, description = parse_help_docstring(cmd)
w.prnt('', '\n{} {} {}\n\n{}'.format(
script_prefix, colorize_string('white', name), cmd_args, description))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_distracting(data, current_buffer, args):
"""
/slack distracting
Add or remove the current channel from distracting channels. You can hide
or unhide these channels with /slack nodistractions.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
fullname = channel.formatted_name(style="long_default")
if fullname in config.distracting_channels:
config.distracting_channels.remove(fullname)
else:
config.distracting_channels.append(fullname)
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
text_linkified = linkify_text(text, team, only_users=True)
s = SlackRequest(team, "chat.command",
{"command": command, "text": text_linkified, 'channel': channel.identifier},
channel=channel, metadata={'command': command, 'command_args': text})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_mute(data, current_buffer, args):
"""
/slack mute
Toggle mute on the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
team.muted_channels ^= {channel.identifier}
muted_str = "Muted" if channel.identifier in team.muted_channels else "Unmuted"
team.buffer_prnt("{} channel {}".format(muted_str, channel.name))
s = SlackRequest(team, "users.prefs.set",
{"name": "muted_channels", "value": ",".join(team.muted_channels)}, channel=channel)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_linkarchive(data, current_buffer, args):
"""
/slack linkarchive [message_id]
Place a link to the channel or message in the input bar.
Use cursor or mouse mode to get the id.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://{}/'.format(channel.team.domain)
if isinstance(channel, SlackChannelCommon):
url += 'archives/{}/'.format(channel.identifier)
if args:
if args[0] == '$':
message_id = args[1:]
else:
message_id = args
ts = channel.hashed_messages.get(message_id)
message = channel.messages.get(ts)
if message:
url += 'p{}{:0>6}'.format(message.ts.majorstr(), message.ts.minorstr())
if isinstance(message, SlackThreadMessage):
url += "?thread_ts={}&cid={}".format(message.parent_message.ts, channel.identifier)
else:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
w.command(current_buffer, "/input insert {}".format(url))
return w.WEECHAT_RC_OK_EAT
command_linkarchive.completion = '%(threads)'
@utf8_decode
def command_nodistractions(data, current_buffer, args):
"""
/slack nodistractions
Hide or unhide all channels marked as distracting.
"""
global hide_distractions
hide_distractions = not hide_distractions
channels = [channel for channel in EVENTROUTER.weechat_controller.buffers.values()
if channel in config.distracting_channels]
for channel in channels:
w.buffer_set(channel.channel_buffer, "hidden", str(int(hide_distractions)))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_upload(data, current_buffer, args):
"""
/slack upload <filename>
Uploads a file to the current buffer.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
weechat_dir = w.info_get("weechat_dir", "")
file_path = os.path.join(weechat_dir, os.path.expanduser(args))
if channel.type == 'team':
w.prnt('', "ERROR: Can't upload a file to the team buffer")
return w.WEECHAT_RC_ERROR
if not os.path.isfile(file_path):
unescaped_file_path = file_path.replace(r'\ ', ' ')
if os.path.isfile(unescaped_file_path):
file_path = unescaped_file_path
else:
w.prnt('', 'ERROR: Could not find file: {}'.format(file_path))
return w.WEECHAT_RC_ERROR
post_data = {
'channels': channel.identifier,
}
if isinstance(channel, SlackThreadChannel):
post_data['thread_ts'] = channel.parent_message.ts
url = SlackRequest(channel.team, 'files.upload', post_data, channel=channel).request_string()
options = [
'-s',
'-Ffile=@{}'.format(file_path),
url
]
proxy_string = ProxyWrapper().curl()
if proxy_string:
options.append(proxy_string)
options_hashtable = {'arg{}'.format(i + 1): arg for i, arg in enumerate(options)}
w.hook_process_hashtable('curl', options_hashtable, config.slack_timeout, 'upload_callback', '')
return w.WEECHAT_RC_OK_EAT
command_upload.completion = '%(filename)'
@utf8_decode
def upload_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: Couldn't upload file. Got return code {}. Error: {}".format(return_code, err))
return w.WEECHAT_RC_OK_EAT
try:
response = json.loads(out)
except JSONDecodeError:
w.prnt("", "ERROR: Couldn't process response from file upload. Got: {}".format(out))
return w.WEECHAT_RC_OK_EAT
if not response["ok"]:
w.prnt("", "ERROR: Couldn't upload file. Error: {}".format(response["error"]))
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def away_command_cb(data, current_buffer, args):
all_servers, message = re.match('^/away( -all)? ?(.*)', args).groups()
if all_servers:
team_buffers = [team.channel_buffer for team in EVENTROUTER.teams.values()]
elif current_buffer in EVENTROUTER.weechat_controller.buffers:
team_buffers = [current_buffer]
else:
return w.WEECHAT_RC_OK
for team_buffer in team_buffers:
if message:
command_away(data, team_buffer, args)
else:
command_back(data, team_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_away(data, current_buffer, args):
"""
/slack away
Sets your status as 'away'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team, "users.setPresence", {"presence": "away"})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_status(data, current_buffer, args):
"""
/slack status [<emoji> [<status_message>]|-delete]
Lets you set your Slack Status (not to be confused with away/here).
Prints current status if no arguments are given, unsets the status if -delete is given.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(" ", 1)
if not split_args[0]:
profile = team.users[team.myidentifier].profile
team.buffer_prnt("Status: {} {}".format(
replace_string_with_emoji(profile.get("status_emoji", "")),
profile.get("status_text", "")))
return w.WEECHAT_RC_OK
emoji = "" if split_args[0] == "-delete" else split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
new_profile = {"status_text": text, "status_emoji": emoji}
s = SlackRequest(team, "users.profile.set", {"profile": new_profile})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
command_status.completion = "-delete|%(emoji)"
@utf8_decode
def line_event_cb(data, signal, hashtable):
buffer_pointer = hashtable["_buffer"]
line_timestamp = hashtable["_chat_line_date"]
line_time_id = hashtable["_chat_line_date_printed"]
channel = EVENTROUTER.weechat_controller.buffers.get(buffer_pointer)
if line_timestamp and line_time_id and isinstance(channel, SlackChannelCommon):
ts = SlackTS("{}.{}".format(line_timestamp, line_time_id))
message_hash = channel.hash_message(ts)
if message_hash is None:
return w.WEECHAT_RC_OK
message_hash = "$" + message_hash
if data == "message":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert {}".format(message_hash))
elif data == "delete":
w.command(buffer_pointer, "/input send {}s///".format(message_hash))
elif data == "linkarchive":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/slack linkarchive {}".format(message_hash[1:]))
elif data == "reply":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert /reply {}\\x20".format(message_hash))
elif data == "thread":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/thread {}".format(message_hash))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_back(data, current_buffer, args):
"""
/slack back
Sets your status as 'back'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team, "users.setPresence", {"presence": "auto"})
EVENTROUTER.receive(s)
set_own_presence_active(team)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_label(data, current_buffer, args):
"""
/label <name>
Rename a thread buffer. Note that this is not permanent. It will only last
as long as you keep the buffer and wee-slack open.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if channel.type == 'thread':
new_name = " +" + args
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
return w.WEECHAT_RC_OK
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.mark_read()
return w.WEECHAT_RC_OK
###### NEW EXCEPTIONS
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is None:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
w.buffer_set(slack_debug, "highlight_tags_restrict", "highlight_force")
def load_emoji():
try:
DIR = w.info_get('weechat_dir', '')
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
emojis = json.loads(ef.read())
if 'emoji' in emojis:
print_error('The weemoji.json file is in an old format. Please update it.')
else:
emoji_unicode = {key: value['unicode'] for key, value in emojis.items()}
emoji_skin_tones = {skin_tone['name']: skin_tone['unicode']
for emoji in emojis.values()
for skin_tone in emoji.get('skinVariations', {}).values()}
emoji_with_skin_tones = chain(emoji_unicode.items(), emoji_skin_tones.items())
emoji_with_skin_tones_reverse = {v: k for k, v in emoji_with_skin_tones}
return emoji_unicode, emoji_with_skin_tones_reverse
except:
dbg("Couldn't load emoji list: {}".format(format_exc_only()), 5)
return {}, {}
def parse_help_docstring(cmd):
doc = textwrap.dedent(cmd.__doc__).strip().split('\n', 1)
cmd_line = doc[0].split(None, 1)
args = ''.join(cmd_line[1:])
return cmd_line[0], args, doc[1].strip()
def setup_hooks():
w.bar_item_new('slack_typing_notice', '(extra)typing_bar_item_cb', '')
w.bar_item_new('away', '(extra)away_bar_item_cb', '')
w.bar_item_new('slack_away', '(extra)away_bar_item_cb', '')
w.hook_timer(5000, 0, 0, "ws_ping_cb", "")
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
command_help.completion = '|'.join(EVENTROUTER.cmds.keys())
completions = '||'.join(
'{} {}'.format(name, getattr(cmd, 'completion', ''))
for name, cmd in EVENTROUTER.cmds.items())
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'<command> [<command options>]',
# Description of arguments
'Commands:\n' +
'\n'.join(sorted(EVENTROUTER.cmds.keys())) +
'\nUse /slack help <command> to find out more\n',
# Completions
completions,
# Function name
'slack_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'join_query_command_cb', '')
w.hook_command_run('/join', 'join_query_command_cb', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run('/invite', 'invite_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
for cmd_name in ['hide', 'label', 'rehistory', 'reply', 'thread']:
cmd = EVENTROUTER.cmds[cmd_name]
_, args, description = parse_help_docstring(cmd)
completion = getattr(cmd, 'completion', '')
w.hook_command(cmd_name, description, args, '', completion, 'command_' + cmd_name, '')
w.hook_completion("irc_channel_topic", "complete topic for slack", "topic_completion_cb", "")
w.hook_completion("irc_channels", "complete channels for slack", "channel_completion_cb", "")
w.hook_completion("irc_privates", "complete dms/mpdms for slack", "dm_completion_cb", "")
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("threads", "complete thread ids for slack", "thread_completion_cb", "")
w.hook_completion("usergroups", "complete @-usergroups for slack", "usergroups_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
w.key_bind("mouse", {
"@chat(python.*):button2": "hsignal:slack_mouse",
})
w.key_bind("cursor", {
"@chat(python.*):D": "hsignal:slack_cursor_delete",
"@chat(python.*):L": "hsignal:slack_cursor_linkarchive",
"@chat(python.*):M": "hsignal:slack_cursor_message",
"@chat(python.*):R": "hsignal:slack_cursor_reply",
"@chat(python.*):T": "hsignal:slack_cursor_thread",
})
w.hook_hsignal("slack_mouse", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_delete", "line_event_cb", "delete")
w.hook_hsignal("slack_cursor_linkarchive", "line_event_cb", "linkarchive")
w.hook_hsignal("slack_cursor_message", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_reply", "line_event_cb", "reply")
w.hook_hsignal("slack_cursor_thread", "line_event_cb", "thread")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
with open('/tmp/debug.log', 'a+') as log_file:
log_file.writelines(message + '\n')
if main_buffer:
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
w.prnt(slack_debug, message)
###### Config code
class PluginConfig(object):
Setting = collections.namedtuple('Setting', ['default', 'desc'])
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'auto_open_threads': Setting(
default='false',
desc='Automatically open threads when mentioned or in'
'response to own messages.'),
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'color_buflist_muted_channels': Setting(
default='darkgray',
desc='Color to use for muted channels in the buflist'),
'color_deleted': Setting(
default='red',
desc='Color to use for deleted messages and files.'),
'color_edited_suffix': Setting(
default='095',
desc='Color to use for (edited) suffix on messages that have been edited.'),
'color_reaction_suffix': Setting(
default='darkgray',
desc='Color to use for the [:wave:(@user)] suffix on messages that'
' have reactions attached to them.'),
'color_reaction_suffix_added_by_you': Setting(
default='blue',
desc='Color to use for reactions that you have added.'),
'color_thread_suffix': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them. The special value "multiple" can'
' be used to use a different color for each thread.'),
'color_typing_notice': Setting(
default='yellow',
desc='Color to use for the typing notice.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'external_user_suffix': Setting(
default='*',
desc='The suffix appended to nicks to indicate external users.'),
'files_download_location': Setting(
default='',
desc='If set, file attachments will be automatically downloaded'
' to this location. "%h" will be replaced by WeeChat home,'
' "~/.weechat" by default.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'muted_channels_activity': Setting(
default='personal_highlights',
desc="Control which activity you see from muted channels, either"
" none, personal_highlights, all_highlights or all. none: Don't"
" show any activity. personal_highlights: Only show personal"
" highlights, i.e. not @channel and @here. all_highlights: Show"
" all highlights, but not other messages. all: Show all activity,"
" like other channels."),
'notify_usergroup_handle_updated': Setting(
default='false',
desc="Control if you want to see notification when a usergroup's"
" handle has changed, either true or false."),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_emoji_as_string': Setting(
default='false',
desc="Render emojis as :emoji_name: instead of emoji characters. Enable this"
" if your terminal doesn't support emojis, or set to 'both' if you want to"
" see both renderings. Note that even though this is"
" disabled by default, you need to place {}/blob/master/weemoji.json in your"
" weechat directory to enable rendering emojis as emoji characters."
.format(REPO_URL)),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'shared_name_prefix': Setting(
default='%',
desc='The prefix of buffer names for shared channels.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers.'),
'show_buflist_presence': Setting(
default='true',
desc='Display a `+` character in the buffer list for present users.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_messages_in_channel': Setting(
default='false',
desc='When enabled shows thread messages in the parent channel.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
'use_full_names': Setting(
default='false',
desc='Use full names as the nicks for all users. When this is'
' false (the default), display names will be used if set, with a'
' fallback to the full name if display name is not set.'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.items():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
try:
return getattr(self, 'get_' + key)(key)
except AttributeError:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
except:
# There was setting-specific getter, but it failed.
return self.settings[key]
def __getattr__(self, key):
try:
return self.settings[key]
except KeyError:
raise AttributeError(key)
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_color_buflist_muted_channels = get_string
get_color_deleted = get_string
get_color_edited_suffix = get_string
get_color_reaction_suffix = get_string
get_color_reaction_suffix_added_by_you = get_string
get_color_thread_suffix = get_string
get_color_typing_notice = get_string
get_debug_level = get_int
get_external_user_suffix = get_string
get_files_download_location = get_string
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_muted_channels_activity = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_shared_name_prefix = get_string
get_slack_timeout = get_int
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',') if x]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
return dict(item.split(":") for item in alias_list.split(",") if ':' in item)
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def get_render_emoji_as_string(self, key):
s = w.config_get_plugin(key)
if s == 'both':
return s
return w.config_string_to_boolean(s)
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
old_thread_color_config = w.config_get_plugin("thread_suffix_color")
new_thread_color_config = w.config_get_plugin("color_thread_suffix")
if old_thread_color_config and not new_thread_color_config:
w.config_set_plugin("color_thread_suffix", old_thread_color_config)
def config_server_buffer_cb(data, key, value):
for team in EVENTROUTER.teams.values():
team.buffer_merge(value)
return w.WEECHAT_RC_OK
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename), file=f)
f.flush()
return
def initiate_connection(token, retries=3, team=None):
return SlackRequest(team,
'rtm.{}'.format('connect' if team else 'start'),
{"batch_presence_aware": 1},
retries=retries,
token=token,
metadata={'initial_connection': True})
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
receive_httprequest_callback = EVENTROUTER.receive_httprequest_callback
receive_ws_callback = EVENTROUTER.receive_ws_callback
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
hide_distractions = False
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_config("irc.look.server_buffer", "config_server_buffer_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI, EMOJI_WITH_SKIN_TONES_REVERSE = load_emoji()
setup_hooks()
# attach to the weechat hooks we need
tokens = [token.strip() for token in config.slack_api_token.split(',')]
w.prnt('', 'Connecting to {} slack team{}.'
.format(len(tokens), '' if len(tokens) == 1 else 's'))
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
# END attach to the weechat hooks we need
hdata = Hdata(w)
| gpl-3.0 |
partofthething/home-assistant | homeassistant/components/fortios/device_tracker.py | 24 | 2933 | """
Support to use FortiOS device like FortiGate as device tracker.
This component is part of the device_tracker platform.
"""
import logging
from fortiosapi import FortiOSAPI
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_TOKEN, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a FortiOSDeviceScanner."""
host = config[DOMAIN][CONF_HOST]
verify_ssl = config[DOMAIN][CONF_VERIFY_SSL]
token = config[DOMAIN][CONF_TOKEN]
fgt = FortiOSAPI()
try:
fgt.tokenlogin(host, token, verify_ssl)
except ConnectionError as ex:
_LOGGER.error("ConnectionError to FortiOS API: %s", ex)
return None
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Failed to login to FortiOS API: %s", ex)
return None
return FortiOSDeviceScanner(fgt)
class FortiOSDeviceScanner(DeviceScanner):
"""This class queries a FortiOS unit for connected devices."""
def __init__(self, fgt) -> None:
"""Initialize the scanner."""
self._clients = {}
self._clients_json = {}
self._fgt = fgt
def update(self):
"""Update clients from the device."""
clients_json = self._fgt.monitor("user/device/select", "")
self._clients_json = clients_json
self._clients = []
if clients_json:
for client in clients_json["results"]:
if client["last_seen"] < 180:
self._clients.append(client["mac"].upper())
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update()
return self._clients
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
_LOGGER.debug("Getting name of device %s", device)
device = device.lower()
data = self._clients_json
if data == 0:
_LOGGER.error("No json results to get device names")
return None
for client in data["results"]:
if client["mac"] == device:
try:
name = client["host"]["name"]
_LOGGER.debug("Getting device name=%s", name)
return name
except KeyError as kex:
_LOGGER.error("Name not found in client data: %s", kex)
return None
return None
| mit |
msarana/selenium_python | ENV/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py | 505 | 1421 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| apache-2.0 |
elliott10/qemu-instru-tracer | scripts/qemu-gdb.py | 286 | 2813 | #!/usr/bin/python
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Contributions after 2012-01-13 are licensed under the terms of the
# GNU GPL, version 2 or (at your option) any later version.
import gdb
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def int128(p):
return long(p['lo']) + (long(p['hi']) << 64)
class QemuCommand(gdb.Command):
'''Prefix for QEMU debug support commands'''
def __init__(self):
gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
class MtreeCommand(gdb.Command):
'''Display the memory tree hierarchy'''
def __init__(self):
gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.queue = []
def invoke(self, arg, from_tty):
self.seen = set()
self.queue_root('address_space_memory')
self.queue_root('address_space_io')
self.process_queue()
def queue_root(self, varname):
ptr = gdb.parse_and_eval(varname)['root']
self.queue.append(ptr)
def process_queue(self):
while self.queue:
ptr = self.queue.pop(0)
if long(ptr) in self.seen:
continue
self.print_item(ptr)
def print_item(self, ptr, offset = gdb.Value(0), level = 0):
self.seen.add(long(ptr))
addr = ptr['addr']
addr += offset
size = int128(ptr['size'])
alias = ptr['alias']
klass = ''
if not isnull(alias):
klass = ' (alias)'
elif not isnull(ptr['ops']):
klass = ' (I/O)'
elif bool(ptr['ram']):
klass = ' (RAM)'
gdb.write('%s%016x-%016x %s%s (@ %s)\n'
% (' ' * level,
long(addr),
long(addr + (size - 1)),
ptr['name'].string(),
klass,
ptr,
),
gdb.STDOUT)
if not isnull(alias):
gdb.write('%s alias: %s@%016x (@ %s)\n' %
(' ' * level,
alias['name'].string(),
ptr['alias_offset'],
alias,
),
gdb.STDOUT)
self.queue.append(alias)
subregion = ptr['subregions']['tqh_first']
level += 1
while not isnull(subregion):
self.print_item(subregion, addr, level)
subregion = subregion['subregions_link']['tqe_next']
QemuCommand()
MtreeCommand()
| gpl-2.0 |
Ictp/indico | doc/api/source/event_api_docs.py | 2 | 2619 | import StringIO
import os, sys, re, types
from zope.interface import Interface, interface
import conf
PATH = '../../../indico/'
from MaKaC import common
from indico.core.extpoint import IListener, IContributor
def iterate_sources(dir, exclude=[]):
"""
iterates through all *.py files inside a dir, recursively
"""
for dirname, dirnames, filenames in os.walk(dir):
for filename in filenames:
relDir = os.path.relpath(dirname, dir)
cont = False
for exc in exclude:
if relDir.startswith(exc):
cont = True
if cont:
continue
m = re.match(r'^(.*)\.py$', filename)
if m:
name = m.group(1)
rel = os.path.relpath(dirname, dir).split('/')
if rel == ['.']:
yield 'indico'
elif name == '__init__':
yield '.'.join(['indico'] + rel)
else:
yield '.'.join(['indico'] + rel + [name])
def docsFor(mod, iface, content):
path = "%s.%s" % (mod, iface.__name__)
content.write(""".. autointerface:: %s\n""" % path)
def _rst_title(text, char='='):
return "%s\n%s\n%s\n" % (char * len(text), text, char * len(text))
def gatherInfo(mod, content):
first = True
for elem, val in mod.__dict__.iteritems():
if type(val) == interface.InterfaceClass:
if val.__module__ == mod.__name__ and \
(val.extends(IListener) or val.extends(IContributor)):
if first:
content.write(_rst_title(mod.__name__, char='-'))
content.write(""".. automodule:: %s\n""" % mod.__name__)
first = False
if val.extends(IListener):
docsFor(mod.__name__, val, content)
elif val.extends(IContributor):
docsFor(mod.__name__, val, content)
def main(fname):
"""
main function
"""
content = StringIO.StringIO()
content.write(_rst_title("Listener/Contributor API"))
for f in iterate_sources(PATH, exclude=["MaKaC/po"]):
# try:
try:
mod = __import__(f)
for pelem in f.split('.')[1:]:
mod = getattr(mod, pelem)
gatherInfo(mod, content)
except ImportError:
sys.stderr.write("Import of '%s' failed!\n" % f)
with open(fname, 'w') as fd:
fd.write(content.getvalue())
content.close()
if __name__ == '__main__':
main(sys.argv[1])
| gpl-3.0 |
MatusKysel/Medusa | tools/perf/scripts/python/sctop.py | 1996 | 2102 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
towerjoo/mindsbook | django/contrib/messages/storage/cookie.py | 89 | 5873 | from django.conf import settings
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import CompatCookie
from django.utils import simplejson as json
from django.utils.crypto import salted_hmac, constant_time_compare
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
message = [self.message_key, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
return Message(*obj[1:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return dict([(key, self.process_messages(value))
for key, value in obj.iteritems()])
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# We should be able to store 4K in a cookie, but Internet Explorer
# imposes 4K as the *total* limit for a domain. To allow other
# cookies, we go for 3/4 of 4K.
max_cookie_size = 3072
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data)
else:
response.delete_cookie(self.cookie_name)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by CompatCookie, which
# adds it's own overhead, which we must account for.
cookie = CompatCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes a encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
qiyuangong/leetcode | python/404_Sum_of_Left_Leaves.py | 2 | 1068 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# def sumOfLeftLeaves(self, root):
# """
# :type root: TreeNode
# :rtype: int
# """
# if root is None:
# return 0
# if root.left is not None:
# if root.left.left is None and root.left.right is None:
# return root.left.val + self.sumOfLeftLeaves(root.right)
# return self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right)
def sumOfLeftLeaves(self, root):
stack = [root]
res = 0
while len(stack) > 0:
curr = stack.pop(0)
if curr is not None:
if curr.left is not None:
if curr.left.left is None and curr.left.right is None:
res += curr.left.val
stack.insert(0, curr.right)
stack.insert(0, curr.left)
return res
| mit |
taigaio/taiga-back | taiga/importers/asana/api.py | 1 | 5842 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from django.conf import settings
from taiga.base.api import viewsets
from taiga.base import response
from taiga.base import exceptions as exc
from taiga.base.decorators import list_route
from taiga.users.services import get_user_photo_url
from taiga.users.gravatar import get_user_gravatar_id
from taiga.importers import permissions, exceptions
from taiga.importers.services import resolve_users_bindings
from .importer import AsanaImporter
from . import tasks
class AsanaImporterViewSet(viewsets.ViewSet):
permission_classes = (permissions.ImporterPermission,)
@list_route(methods=["POST"])
def list_users(self, request, *args, **kwargs):
self.check_permissions(request, "list_users", None)
token = request.DATA.get('token', None)
project_id = request.DATA.get('project', None)
if not project_id:
raise exc.WrongArguments(_("The project param is needed"))
importer = AsanaImporter(request.user, token)
try:
users = importer.list_users(project_id)
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
for user in users:
if user['detected_user']:
user['user'] = {
'id': user['detected_user'].id,
'full_name': user['detected_user'].get_full_name(),
'gravatar_id': get_user_gravatar_id(user['detected_user']),
'photo': get_user_photo_url(user['detected_user']),
}
del(user['detected_user'])
return response.Ok(users)
@list_route(methods=["POST"])
def list_projects(self, request, *args, **kwargs):
self.check_permissions(request, "list_projects", None)
token = request.DATA.get('token', None)
importer = AsanaImporter(request.user, token)
try:
projects = importer.list_projects()
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
return response.Ok(projects)
@list_route(methods=["POST"])
def import_project(self, request, *args, **kwargs):
self.check_permissions(request, "import_project", None)
token = request.DATA.get('token', None)
project_id = request.DATA.get('project', None)
if not project_id:
raise exc.WrongArguments(_("The project param is needed"))
options = {
"name": request.DATA.get('name', None),
"description": request.DATA.get('description', None),
"template": request.DATA.get('template', "scrum"),
"users_bindings": resolve_users_bindings(request.DATA.get("users_bindings", {})),
"keep_external_reference": request.DATA.get("keep_external_reference", False),
"is_private": request.DATA.get("is_private", False),
}
if settings.CELERY_ENABLED:
task = tasks.import_project.delay(request.user.id, token, project_id, options)
return response.Accepted({"task_id": task.id})
importer = AsanaImporter(request.user, token)
project = importer.import_project(project_id, options)
project_data = {
"slug": project.slug,
"my_permissions": ["view_us"],
"is_backlog_activated": project.is_backlog_activated,
"is_kanban_activated": project.is_kanban_activated,
}
return response.Ok(project_data)
@list_route(methods=["GET"])
def auth_url(self, request, *args, **kwargs):
self.check_permissions(request, "auth_url", None)
url = AsanaImporter.get_auth_url(
settings.IMPORTERS.get('asana', {}).get('app_id', None),
settings.IMPORTERS.get('asana', {}).get('app_secret', None),
settings.IMPORTERS.get('asana', {}).get('callback_url', None)
)
return response.Ok({"url": url})
@list_route(methods=["POST"])
def authorize(self, request, *args, **kwargs):
self.check_permissions(request, "authorize", None)
code = request.DATA.get('code', None)
if code is None:
raise exc.BadRequest(_("Code param needed"))
try:
asana_token = AsanaImporter.get_access_token(
code,
settings.IMPORTERS.get('asana', {}).get('app_id', None),
settings.IMPORTERS.get('asana', {}).get('app_secret', None),
settings.IMPORTERS.get('asana', {}).get('callback_url', None)
)
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
return response.Ok({"token": asana_token})
| agpl-3.0 |
htwenhe/DJOA | env/Lib/site-packages/django/conf/locale/cy/formats.py | 504 | 1822 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
baruch/libsigrokdecode | decoders/usb_packet/pd.py | 10 | 12962 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <[email protected]>
## Copyright (C) 2012-2014 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>, <pdata>:
- 'SYNC', <sync>
- 'PID', <pid>
- 'ADDR', <addr>
- 'EP', <ep>
- 'CRC5', <crc5>
- 'CRC16', <crc16>
- 'EOP', <eop>
- 'FRAMENUM', <framenum>
- 'DATABYTE', <databyte>
- 'HUBADDR', <hubaddr>
- 'SC', <sc>
- 'PORT', <port>
- 'S', <s>
- 'E/U', <e/u>
- 'ET', <et>
- 'PACKET', [<pcategory>, <pname>, <pinfo>]
<pcategory>, <pname>, <pinfo>:
- 'TOKEN', 'OUT', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'TOKEN', 'IN', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'TOKEN', 'SOF', [<sync>, <pid>, <framenum>, <crc5>, <eop>]
- 'TOKEN', 'SETUP', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'DATA', 'DATA0', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'DATA1', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'DATA2', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'MDATA', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'HANDSHAKE', 'ACK', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'NAK', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'STALL', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'NYET', [<sync>, <pid>, <eop>]
- 'SPECIAL', 'PRE', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'SPECIAL', 'ERR', [<sync>, <pid>, <eop>]
- 'SPECIAL', 'SPLIT',
[<sync>, <pid>, <hubaddr>, <sc>, <port>, <s>, <e/u>, <et>, <crc5>, <eop>]
- 'SPECIAL', 'PING', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'SPECIAL', 'Reserved', None
<sync>: SYNC field bitstring, normally '00000001' (8 chars).
<pid>: Packet ID bitstring, e.g. '11000011' for DATA0 (8 chars).
<addr>: Address field number, 0-127 (7 bits).
<ep>: Endpoint number, 0-15 (4 bits).
<crc5>: CRC-5 number (5 bits).
<crc16>: CRC-16 number (16 bits).
<eop>: End of packet marker. List of symbols, usually ['SE0', 'SE0', 'J'].
<framenum>: USB (micro)frame number, 0-2047 (11 bits).
<databyte>: A single data byte, e.g. 0x55.
<databytes>: List of data bytes, e.g. [0x55, 0xaa, 0x99] (0 - 1024 bytes).
<hubaddr>: TODO
<sc>: TODO
<port>: TODO
<s>: TODO
<e/u>: TODO
<et>: TODO
'''
# Packet IDs (PIDs).
# The first 4 bits are the 'packet type' field, the last 4 bits are the
# 'check field' (each bit in the check field must be the inverse of the resp.
# bit in the 'packet type' field; if not, that's a 'PID error').
# For the 4-bit strings, the left-most '1' or '0' is the LSB, i.e. it's sent
# to the bus first.
pids = {
# Tokens
'10000111': ['OUT', 'Address & EP number in host-to-function transaction'],
'10010110': ['IN', 'Address & EP number in function-to-host transaction'],
'10100101': ['SOF', 'Start-Of-Frame marker & frame number'],
'10110100': ['SETUP', 'Address & EP number in host-to-function transaction for SETUP to a control pipe'],
# Data
# Note: DATA2 and MDATA are HS-only.
'11000011': ['DATA0', 'Data packet PID even'],
'11010010': ['DATA1', 'Data packet PID odd'],
'11100001': ['DATA2', 'Data packet PID HS, high bandwidth isosynchronous transaction in a microframe'],
'11110000': ['MDATA', 'Data packet PID HS for split and high-bandwidth isosynchronous transactions'],
# Handshake
'01001011': ['ACK', 'Receiver accepts error-free packet'],
'01011010': ['NAK', 'Receiver cannot accept or transmitter cannot send'],
'01111000': ['STALL', 'EP halted or control pipe request unsupported'],
'01101001': ['NYET', 'No response yet from receiver'],
# Special
'00111100': ['PRE', 'Host-issued preamble; enables downstream bus traffic to low-speed devices'],
'00111100': ['ERR', 'Split transaction error handshake'],
'00011110': ['SPLIT', 'HS split transaction token'],
'00101101': ['PING', 'HS flow control probe for a bulk/control EP'],
'00001111': ['Reserved', 'Reserved PID'],
}
def get_category(pidname):
if pidname in ('OUT', 'IN', 'SOF', 'SETUP'):
return 'TOKEN'
elif pidname in ('DATA0', 'DATA1', 'DATA2', 'MDATA'):
return 'DATA'
elif pidname in ('ACK', 'NAK', 'STALL', 'NYET'):
return 'HANDSHAKE'
else:
return 'SPECIAL'
def ann_index(pidname):
l = ['OUT', 'IN', 'SOF', 'SETUP', 'DATA0', 'DATA1', 'DATA2', 'MDATA',
'ACK', 'NAK', 'STALL', 'NYET', 'PRE', 'ERR', 'SPLIT', 'PING',
'Reserved']
if pidname not in l:
return 28
return l.index(pidname) + 11
def bitstr_to_num(bitstr):
if not bitstr:
return 0
l = list(bitstr)
l.reverse()
return int(''.join(l), 2)
class Decoder(srd.Decoder):
api_version = 2
id = 'usb_packet'
name = 'USB packet'
longname = 'Universal Serial Bus (LS/FS) packet'
desc = 'USB (low-speed and full-speed) packet protocol.'
license = 'gplv2+'
inputs = ['usb_signalling']
outputs = ['usb_packet']
options = (
{'id': 'signalling', 'desc': 'Signalling',
'default': 'full-speed', 'values': ('full-speed', 'low-speed')},
)
annotations = (
('sync-ok', 'SYNC'),
('sync-err', 'SYNC (error)'),
('pid', 'PID'),
('framenum', 'FRAMENUM'),
('addr', 'ADDR'),
('ep', 'EP'),
('crc5-ok', 'CRC5'),
('crc5-err', 'CRC5 (error)'),
('data', 'DATA'),
('crc16-ok', 'CRC16'),
('crc16-err', 'CRC16 (error)'),
('packet-out', 'Packet: OUT'),
('packet-in', 'Packet: IN'),
('packet-sof', 'Packet: SOF'),
('packet-setup', 'Packet: SETUP'),
('packet-data0', 'Packet: DATA0'),
('packet-data1', 'Packet: DATA1'),
('packet-data2', 'Packet: DATA2'),
('packet-mdata', 'Packet: MDATA'),
('packet-ack', 'Packet: ACK'),
('packet-nak', 'Packet: NAK'),
('packet-stall', 'Packet: STALL'),
('packet-nyet', 'Packet: NYET'),
('packet-pre', 'Packet: PRE'),
('packet-err', 'Packet: ERR'),
('packet-split', 'Packet: SPLIT'),
('packet-ping', 'Packet: PING'),
('packet-reserved', 'Packet: Reserved'),
('packet-invalid', 'Packet: Invalid'),
)
annotation_rows = (
('fields', 'Packet fields', tuple(range(10 + 1))),
('packet', 'Packets', tuple(range(11, 28 + 1))),
)
def __init__(self):
self.bits = []
self.packet = []
self.packet_summary = ''
self.ss = self.es = None
self.ss_packet = self.es_packet = None
self.state = 'WAIT FOR SOP'
def putpb(self, data):
self.put(self.ss, self.es, self.out_python, data)
def putb(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def putpp(self, data):
self.put(self.ss_packet, self.es_packet, self.out_python, data)
def putp(self, data):
self.put(self.ss_packet, self.es_packet, self.out_ann, data)
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_packet(self):
packet = ''
for (bit, ss, es) in self.bits:
packet += bit
# Bits[0:7]: SYNC
sync = packet[:7 + 1]
self.ss, self.es = self.bits[0][1], self.bits[7][2]
# The SYNC pattern for low-speed/full-speed is KJKJKJKK (00000001).
if sync != '00000001':
self.putpb(['SYNC ERROR', sync])
self.putb([1, ['SYNC ERROR: %s' % sync, 'SYNC ERR: %s' % sync,
'SYNC ERR', 'SE', 'S']])
else:
self.putpb(['SYNC', sync])
self.putb([0, ['SYNC: %s' % sync, 'SYNC', 'S']])
self.packet.append(sync)
# Bits[8:15]: PID
pid = packet[8:15 + 1]
pidname = pids.get(pid, (pid, ''))[0]
self.ss, self.es = self.bits[8][1], self.bits[15][2]
self.putpb(['PID', pidname])
self.putb([2, ['PID: %s' % pidname, pidname, pidname[0]]])
self.packet.append(pid)
self.packet_summary += pidname
if pidname in ('OUT', 'IN', 'SOF', 'SETUP', 'PRE', 'PING'):
if pidname == 'SOF':
# Bits[16:26]: Framenum
framenum = bitstr_to_num(packet[16:26 + 1])
self.ss, self.es = self.bits[16][1], self.bits[26][2]
self.putpb(['FRAMENUM', framenum])
self.putb([3, ['Frame: %d' % framenum, 'Frame', 'Fr', 'F']])
self.packet.append(framenum)
self.packet_summary += ' %d' % framenum
else:
# Bits[16:22]: Addr
addr = bitstr_to_num(packet[16:22 + 1])
self.ss, self.es = self.bits[16][1], self.bits[22][2]
self.putpb(['ADDR', addr])
self.putb([4, ['Address: %d' % addr, 'Addr: %d' % addr,
'Addr', 'A']])
self.packet.append(addr)
self.packet_summary += ' ADDR %d' % addr
# Bits[23:26]: EP
ep = bitstr_to_num(packet[23:26 + 1])
self.ss, self.es = self.bits[23][1], self.bits[26][2]
self.putpb(['EP', ep])
self.putb([5, ['Endpoint: %d' % ep, 'EP: %d' % ep, 'EP', 'E']])
self.packet.append(ep)
self.packet_summary += ' EP %d' % ep
# Bits[27:31]: CRC5
crc5 = bitstr_to_num(packet[27:31 + 1])
self.ss, self.es = self.bits[27][1], self.bits[31][2]
self.putpb(['CRC5', crc5])
self.putb([6, ['CRC5: 0x%02X' % crc5, 'CRC5', 'C']])
self.packet.append(crc5)
elif pidname in ('DATA0', 'DATA1', 'DATA2', 'MDATA'):
# Bits[16:packetlen-16]: Data
data = packet[16:-16]
# TODO: len(data) must be a multiple of 8.
databytes = []
self.packet_summary += ' ['
for i in range(0, len(data), 8):
db = bitstr_to_num(data[i:i + 8])
self.ss, self.es = self.bits[16 + i][1], self.bits[23 + i][2]
self.putpb(['DATABYTE', db])
self.putb([8, ['Databyte: %02X' % db, 'Data: %02X' % db,
'DB: %02X' % db, '%02X' % db]])
databytes.append(db)
self.packet_summary += ' %02X' % db
data = data[8:]
self.packet_summary += ' ]'
# Convenience Python output (no annotation) for all bytes together.
self.ss, self.es = self.bits[16][1], self.bits[-16][2]
self.putpb(['DATABYTES', databytes])
self.packet.append(databytes)
# Bits[packetlen-16:packetlen]: CRC16
crc16 = bitstr_to_num(packet[-16:])
self.ss, self.es = self.bits[-16][1], self.bits[-1][2]
self.putpb(['CRC16', crc16])
self.putb([9, ['CRC16: 0x%04X' % crc16, 'CRC16', 'C']])
self.packet.append(crc16)
elif pidname in ('ACK', 'NAK', 'STALL', 'NYET', 'ERR'):
pass # Nothing to do, these only have SYNC+PID+EOP fields.
else:
pass # TODO: Handle 'SPLIT' and possibly 'Reserved' packets.
# Output a (summary of) the whole packet.
pcategory, pname, pinfo = get_category(pidname), pidname, self.packet
self.putpp(['PACKET', [pcategory, pname, pinfo]])
self.putp([ann_index(pidname), ['%s' % self.packet_summary]])
self.packet, self.packet_summary = [], ''
def decode(self, ss, es, data):
(ptype, pdata) = data
# We only care about certain packet types for now.
if ptype not in ('SOP', 'BIT', 'EOP'):
return
# State machine.
if self.state == 'WAIT FOR SOP':
if ptype != 'SOP':
return
self.ss_packet = ss
self.state = 'GET BIT'
elif self.state == 'GET BIT':
if ptype == 'BIT':
self.bits.append([pdata, ss, es])
elif ptype == 'EOP':
self.es_packet = es
self.handle_packet()
self.bits, self.state = [], 'WAIT FOR SOP'
else:
pass # TODO: Error
| gpl-3.0 |
spennihana/h2o-3 | h2o-py/tests/testdir_hdfs/pyunit_NOFEATURE_INTERNAL_HDFS_import_folder_csv_orc_same_milsongs.py | 4 | 1316 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import time
from tests import pyunit_utils
#----------------------------------------------------------------------
# This test is used to show what happens if we split the same datasets
# into one part csv, one part orc
#----------------------------------------------------------------------
def hdfs_orc_parser():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
hdfs_name_node = pyunit_utils.hadoop_namenode()
if pyunit_utils.cannaryHDFSTest(hdfs_name_node, "/datasets/orc_parser/orc/orc_split_elim.orc"):
print("Your hive-exec version is too old. Orc parser test {0} is "
"skipped.".format("pyunit_INTERNAL_HDFS_import_folder_orc.py"))
pass
else:
mix_folder = "/datasets/csv_orc_same_milsongs"
url_csv1 = "hdfs://{0}{1}".format(hdfs_name_node, mix_folder)
multi_file_mixed = h2o.import_file(url_csv1)
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_orc_parser)
else:
hdfs_orc_parser() | apache-2.0 |
mayapurmedia/tovp | tovp/contributions/migrations/0030_auto_20150514_1332.py | 2 | 1154 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contributions', '0029_auto_20150514_1331'),
]
operations = [
migrations.AlterModelOptions(
name='contribution',
options={'permissions': (('can_edit_completed', 'Can edit completed'), ('can_deposit', 'Can deposit'))},
),
migrations.AddField(
model_name='contribution',
name='deposited_status',
field=models.CharField(choices=[('not-deposited', 'Not deposited'), ('ready-to-deposit', 'Ready to deposit'), ('deposited', 'Deposited')], verbose_name='Is Deposited', default='not-deposited', max_length=20),
preserve_default=True,
),
migrations.AddField(
model_name='contribution',
name='deposited_status_changed',
field=model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='deposited_status'),
preserve_default=True,
),
]
| mit |
abdellatifkarroum/odoo | addons/crm_partner_assign/wizard/__init__.py | 389 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_forward_to_partner
import crm_channel_interested | agpl-3.0 |
mfalaize/carnet-entretien | compta/forms.py | 2 | 3602 | from django import forms
from django.utils.translation import ugettext_lazy as _
from compta.models import Budget, OperationEpargne, Operation, CategorieEpargne
class BudgetForm(forms.ModelForm):
class Meta:
model = Budget
fields = ['categorie', 'compte_associe', 'budget', 'solde_en_une_fois']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['budget'].widget.attrs['autofocus'] = True
class OperationCategoriesForm(forms.Form):
operation_id = forms.IntegerField(required=True, widget=forms.HiddenInput())
categorie = forms.ChoiceField(required=False, choices=(("", ""),))
redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput())
def __init__(self, post, render_initial=True):
if render_initial:
super().__init__()
else:
super().__init__(post)
operation = post.get('operation')
categories_epargne = post.get('categories_epargne')
redirect = post.get('redirect')
operation_id = post.get('operation_id')
if redirect is not None:
self.fields['redirect'].initial = redirect
if redirect:
self.fields['categorie'].widget = forms.HiddenInput()
self.fields['categorie'].initial = ""
if operation is None and operation_id is not None:
if render_initial:
self.fields['operation_id'].initial = post.get('operation_id')
else:
operation = Operation.objects.get(pk=int(operation_id))
if operation is not None:
operation.load_categorie()
if operation.categorie_id is not None:
self.fields['categorie'].initial = operation.categorie_id
self.fields['operation_id'].initial = operation.pk
if operation.compte.epargne:
self.fields['categorie'].choices += (("-1", _("Partagé entre les différentes catégories")),)
if categories_epargne is None:
categories_epargne = CategorieEpargne.objects.all().order_by('libelle')
for categorie in categories_epargne:
self.fields['categorie'].choices += ((str(categorie.pk).replace(" ", ""), categorie.libelle),)
else:
self.fields['categorie'].choices += (("-1", _("Hors Budget")),)
self.fields['categorie'].choices += (("-2", _("Revenue")),)
self.fields['categorie'].choices += (("-3", _("Avance sur débit(s) futur(s)")),)
if operation.compte.utilisateurs.count() > 1:
for utilisateur in operation.compte.utilisateurs.all():
self.fields['categorie'].choices += (("c" + str(-1000 - utilisateur.pk).replace(' ', ''),
_("Contribution") + " " + utilisateur.first_name),)
for utilisateur in operation.compte.utilisateurs.all():
self.fields['categorie'].choices += (("a" + str(-1000 - utilisateur.pk).replace(' ', ''), _(
"Contribution (avances)") + " " + utilisateur.first_name),)
for budget in operation.compte.budget_set.all():
self.fields['categorie'].choices += ((budget.pk, budget.categorie.libelle),)
else:
self.fields['operation_id'].initial = post.get('operation_id')
self.fields['categorie'].choices = sorted(self.fields['categorie'].choices, key=lambda x: x[1])
| gpl-3.0 |
dyrock/trafficserver | tests/gold_tests/thread_config/thread_32_0.test.py | 2 | 2467 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
Test.Summary = 'Test that Trafficserver starts with different thread configurations.'
Test.ContinueOnFail = True
ts = Test.MakeATSProcess('ts')
server = Test.MakeOriginServer('server')
Test.testName = ''
request_header = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 32,
'proxy.config.accept_threads': 0,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'})
ts.Disk.remap_config.AddLine(
'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Setup.CopyAs('check_threads.py', Test.RunDirectory)
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy http://127.0.0.1:{0} http://www.example.com -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'python3 check_threads.py -t {0} -e {1} -a {2}'.format(ts.Env['TS_ROOT'], 32, 0)
tr.Processes.Default.ReturnCode = 0
| apache-2.0 |
erwilan/ansible | contrib/inventory/ovirt4.py | 70 | 7682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
oVirt dynamic inventory script
=================================
Generates dynamic inventory file for oVirt.
Script will return following attributes for each virtual machine:
- id
- name
- host
- cluster
- status
- description
- fqdn
- os_type
- template
- tags
- statistics
- devices
When run in --list mode, virtual machines are grouped by the following categories:
- cluster
- tag
- status
Note: If there is some virtual machine which has has more tags it will be in both tag
records.
Examples:
# Execute update of system on webserver virtual machine:
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
# Get webserver virtual machine information:
$ contrib/inventory/ovirt4.py --host webserver
Author: Ondra Machacek (@machacekondra)
"""
import argparse
import os
import sys
from collections import defaultdict
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
import json
except ImportError:
import simplejson as json
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
sys.exit(1)
def parse_args():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for oVirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def create_connection():
"""
Create a connection to oVirt engine API.
"""
# Get the path of the configuration file, by default use
# 'ovirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ovirt.ini',
)
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
config = configparser.SafeConfigParser(
defaults={
'ovirt_url': None,
'ovirt_username': None,
'ovirt_password': None,
'ovirt_ca_file': None,
}
)
if not config.has_section('ovirt'):
config.add_section('ovirt')
config.read(config_path)
# Create a connection with options defined in ini file:
return sdk.Connection(
url=config.get('ovirt', 'ovirt_url'),
username=config.get('ovirt', 'ovirt_username'),
password=config.get('ovirt', 'ovirt_password'),
ca_file=config.get('ovirt', 'ovirt_ca_file'),
insecure=config.get('ovirt', 'ovirt_ca_file') is None,
)
def get_dict_of_struct(connection, vm):
"""
Transform SDK Vm Struct type to Python dictionary.
"""
if vm is None:
return dict()
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(
vm.cluster.id
).affinity_groups_service().list()
return {
'id': vm.id,
'name': vm.name,
'host': connection.follow_link(vm.host).name if vm.host else None,
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [
group.name for group in groups
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
],
'statistics': dict(
(stat.name, stat.values[0].datum) for stat in stats
),
'devices': dict(
(device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
),
'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
}
def get_data(connection, vm_name=None):
"""
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
if vm_name:
vm = vms_service.list(search='name=%s' % vm_name) or [None]
data = get_dict_of_struct(
connection=connection,
vm=vm[0],
)
else:
vms = dict()
data = defaultdict(list)
for vm in vms_service.list():
name = vm.name
vm_service = vms_service.vm_service(vm.id)
cluster_service = clusters_service.cluster_service(vm.cluster.id)
# Add vm to vms dict:
vms[name] = get_dict_of_struct(connection, vm)
# Add vm to cluster group:
cluster_name = connection.follow_link(vm.cluster).name
data['cluster_%s' % cluster_name].append(name)
# Add vm to tag group:
tags_service = vm_service.tags_service()
for tag in tags_service.list():
data['tag_%s' % tag.name].append(name)
# Add vm to status group:
data['status_%s' % vm.status].append(name)
# Add vm to affinity group:
for group in cluster_service.affinity_groups_service().list():
if vm.name in [
v.name for v in connection.follow_link(group.vms)
]:
data['affinity_group_%s' % group.name].append(vm.name)
# Add vm to affinity label group:
affinity_labels_service = vm_service.affinity_labels_service()
for label in affinity_labels_service.list():
data['affinity_label_%s' % label.name].append(name)
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty * 2,
)
)
if __name__ == '__main__':
main()
| gpl-3.0 |
ThinkOpen-Solutions/odoo | addons/account/project/wizard/account_analytic_chart.py | 362 | 2100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_chart(osv.osv_memory):
_name = 'account.analytic.chart'
_description = 'Account Analytic Chart'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def analytic_account_chart_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result_context = {}
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_analytic_account_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
data = self.read(cr, uid, ids, [])[0]
if data['from_date']:
result_context.update({'from_date': data['from_date']})
if data['to_date']:
result_context.update({'to_date': data['to_date']})
result['context'] = str(result_context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
darktears/chromium-crosswalk | tools/grit/grit/tool/diff_structures.py | 62 | 3923 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit sdiff' tool.
'''
import os
import getopt
import tempfile
from grit.node import structure
from grit.tool import interface
from grit import constants
from grit import util
# Builds the description for the tool (used as the __doc__
# for the DiffStructures class).
_class_doc = """\
Allows you to view the differences in the structure of two files,
disregarding their translateable content. Translateable portions of
each file are changed to the string "TTTTTT" before invoking the diff program
specified by the P4DIFF environment variable.
Usage: grit sdiff [-t TYPE] [-s SECTION] [-e ENCODING] LEFT RIGHT
LEFT and RIGHT are the files you want to diff. SECTION is required
for structure types like 'dialog' to identify the part of the file to look at.
ENCODING indicates the encoding of the left and right files (default 'cp1252').
TYPE can be one of the following, defaults to 'tr_html':
"""
for gatherer in structure._GATHERERS:
_class_doc += " - %s\n" % gatherer
class DiffStructures(interface.Tool):
__doc__ = _class_doc
def __init__(self):
self.section = None
self.left_encoding = 'cp1252'
self.right_encoding = 'cp1252'
self.structure_type = 'tr_html'
def ShortDescription(self):
return 'View differences without regard for translateable portions.'
def Run(self, global_opts, args):
(opts, args) = getopt.getopt(args, 's:e:t:',
['left_encoding=', 'right_encoding='])
for key, val in opts:
if key == '-s':
self.section = val
elif key == '-e':
self.left_encoding = val
self.right_encoding = val
elif key == '-t':
self.structure_type = val
elif key == '--left_encoding':
self.left_encoding = val
elif key == '--right_encoding':
self.right_encoding == val
if len(args) != 2:
print "Incorrect usage - 'grit help sdiff' for usage details."
return 2
if 'P4DIFF' not in os.environ:
print "Environment variable P4DIFF not set; defaulting to 'windiff'."
diff_program = 'windiff'
else:
diff_program = os.environ['P4DIFF']
left_trans = self.MakeStaticTranslation(args[0], self.left_encoding)
try:
try:
right_trans = self.MakeStaticTranslation(args[1], self.right_encoding)
os.system('%s %s %s' % (diff_program, left_trans, right_trans))
finally:
os.unlink(right_trans)
finally:
os.unlink(left_trans)
def MakeStaticTranslation(self, original_filename, encoding):
"""Given the name of the structure type (self.structure_type), the filename
of the file holding the original structure, and optionally the "section" key
identifying the part of the file to look at (self.section), creates a
temporary file holding a "static" translation of the original structure
(i.e. one where all translateable parts have been replaced with "TTTTTT")
and returns the temporary file name. It is the caller's responsibility to
delete the file when finished.
Args:
original_filename: 'c:\\bingo\\bla.rc'
Return:
'c:\\temp\\werlkjsdf334.tmp'
"""
original = structure._GATHERERS[self.structure_type](original_filename,
extkey=self.section,
encoding=encoding)
original.Parse()
translated = original.Translate(constants.CONSTANT_LANGUAGE, False)
fname = tempfile.mktemp()
with util.WrapOutputStream(open(fname, 'w')) as writer:
writer.write("Original filename: %s\n=============\n\n"
% original_filename)
writer.write(translated) # write in UTF-8
return fname
| bsd-3-clause |
hequn8128/flink | flink-python/pyflink/table/sources.py | 10 | 2202 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, _to_java_type
from pyflink.util import utils
__all__ = ['TableSource', 'CsvTableSource']
class TableSource(object):
"""
Defines a table from an external system or location.
"""
def __init__(self, j_table_source):
self._j_table_source = j_table_source
class CsvTableSource(TableSource):
"""
A :class:`TableSource` for simple CSV files with a
(logically) unlimited number of fields.
:param source_path: The path to the CSV file.
:param field_names: The names of the table fields.
:param field_types: The types of the table fields.
"""
def __init__(self, source_path, field_names, field_types):
# type: (str, list[str], list[DataType]) -> None
gateway = get_gateway()
j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
[_to_java_type(field_type)
for field_type in field_types])
super(CsvTableSource, self).__init__(
gateway.jvm.CsvTableSource(source_path, j_field_names, j_field_types))
| apache-2.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_netrc.py | 50 | 4636 | import netrc, os, unittest, sys, textwrap
from test import test_support
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
def test_security(self):
# This test is incomplete since we are normally not run as root and
# therefore can't test the file ownership being wrong.
d = test_support.TESTFN
os.mkdir(d)
self.addCleanup(test_support.rmtree, d)
fn = os.path.join(d, '.netrc')
with open(fn, 'wt') as f:
f.write("""\
machine foo.domain.com login bar password pass
default login foo password pass
""")
with test_support.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
os.chmod(fn, 0600)
nrc = netrc.netrc()
self.assertEqual(nrc.hosts['foo.domain.com'],
('bar', None, 'pass'))
os.chmod(fn, 0o622)
self.assertRaises(netrc.NetrcParseError, netrc.netrc)
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| mit |
AndresVillan/pyafipws | formatos/formato_sql.py | 4 | 15333 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Módulo para manejo de archivos SQL"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2014 Mariano Reingart"
__license__ = "GPL 3.0"
from decimal import Decimal
DEBUG = False
CAE_NULL = None
FECHA_VTO_NULL = None
RESULTADO_NULL = None
NULL = None
def esquema_sql(tipos_registro, conf={}):
from formato_txt import A, N, I
for tabla, formato in tipos_registro:
sql = []
sql.append("CREATE TABLE %s (" % tabla)
if tabla!='encabezado':
# agrego id como fk
id = [('id', 15, N)]
else:
id = []
for (clave, longitud, tipo) in id+formato:
clave_orig = clave
if conf:
if tabla == 'encabezado':
clave = conf["encabezado"].get(clave, clave)
if tabla == 'detalle':
clave = conf["detalle"].get(clave, clave)
if tabla == 'iva':
clave = conf["iva"].get(clave, clave)
if tabla == 'tributo':
clave = conf["tributo"].get(clave, clave)
if tabla == 'cmp_asoc':
clave = conf["cmp_asoc"].get(clave, clave)
if tabla == 'permiso':
clave = conf["permiso"].get(clave, clave)
if isinstance(longitud, (tuple, list)):
longitud, decimales = longitud
else:
decimales = 2
sql.append (" %s %s %s%s%s" % (
clave,
{N: 'INTEGER', I: 'NUMERIC', A: 'VARCHAR'}[tipo],
{I: "(%s, %s)" % (longitud, decimales), A: '(%s)' % longitud, N: ''}[tipo],
clave == 'id' and (tabla=='encabezado' and " PRIMARY KEY" or " FOREING KEY encabezado") or "",
formato[-1][0]!=clave_orig and "," or ""))
sql.append(")")
sql.append(";")
if DEBUG: print '\n'.join(sql)
yield '\n'.join(sql)
def configurar(schema):
tablas = {}
campos = {}
campos_rev = {}
if not schema:
for tabla in "encabezado", "detalle", "cmp_asoc", "permiso", "tributo", "iva":
tablas[tabla] = tabla
campos[tabla] = {"id": "id"}
campos_rev[tabla] = dict([(v, k) for k, v in campos[tabla].items()])
return tablas, campos, campos_rev
def ejecutar(cur, sql, params=None):
print sql, params
if params is None:
return cur.execute(sql)
else:
return cur.execute(sql, params)
def max_id(db, schema={}):
cur = db.cursor()
tablas, campos, campos_rev = configurar(schema)
query = ("SELECT MAX(%%(id)s) FROM %(encabezado)s" % tablas) % campos["encabezado"]
if DEBUG: print "ejecutando",query
ret = None
try:
ejecutar(cur, query)
for row in cur:
ret = row[0]
if not ret:
ret = 0
print "MAX_ID = ", ret
return ret
finally:
cur.close()
def redondear(formato, clave, valor):
from formato_txt import A, N, I
# corregir redondeo (aparentemente sqlite no guarda correctamente los decimal)
import decimal
long = [fmt[1] for fmt in formato if fmt[0]==clave]
tipo = [fmt[2] for fmt in formato if fmt[0]==clave]
if not tipo:
return valor
tipo = tipo[0]
if DEBUG: print "tipo", tipo, clave, valor, long
if valor is None:
return None
if valor == "":
return ""
if tipo == A:
return valor
if tipo == N:
return int(valor)
if isinstance(valor, (int, float)):
valor = str(valor)
if isinstance(valor, basestring):
valor = Decimal(valor)
if long and isinstance(long[0], (tuple, list)):
decimales = Decimal('1') / Decimal(10**(long[0][1]))
else:
decimales = Decimal('.01')
valor1 = valor.quantize(decimales, rounding=decimal.ROUND_DOWN)
if valor != valor1 and DEBUG:
print "REDONDEANDO ", clave, decimales, valor, valor1
return valor1
def escribir(facts, db, schema={}, commit=True):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
try:
for dic in facts:
if not 'id' in dic:
dic['id'] = max_id(db, schema={}) + 1
query = "INSERT INTO %(encabezado)s (%%s) VALUES (%%s)" % tablas
fields = ','.join([campos["encabezado"].get(k, k) for k,t,n in ENCABEZADO if k in dic])
values = ','.join(['?' for k,t,n in ENCABEZADO if k in dic])
if DEBUG: print "Ejecutando2: %s %s" % (query % (fields, values), [dic[k] for k,t,n in ENCABEZADO if k in dic])
ejecutar(cur, query % (fields, values), [dic[k] for k,t,n in ENCABEZADO if k in dic])
query = ("INSERT INTO %(detalle)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["detalle"]
for item in dic['detalles']:
fields = ','.join([campos["detalle"].get(k, k) for k,t,n in DETALLE if k in item])
values = ','.join(['?' for k,t,n in DETALLE if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in DETALLE if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in DETALLE if k in item])
if 'cbtes_asoc' in dic and tablas["cmp_asoc"]:
query = ("INSERT INTO %(cmp_asoc)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["cmp_asoc"]
for item in dic['cbtes_asoc']:
fields = ','.join([campos["cmp_asoc"].get(k, k) for k,t,n in CMP_ASOC if k in item])
values = ','.join(['?' for k,t,n in CMP_ASOC if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in CMP_ASOC if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in CMP_ASOC if k in item])
if 'permisos' in dic:
query = ("INSERT INTO %(permiso)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["permiso"]
for item in dic['permisos']:
fields = ','.join([campos["permiso"].get(k, k) for k,t,n in PERMISO if k in item])
values = ','.join(['?' for k,t,n in PERMISO if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in PERMISO if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in PERMISO if k in item])
if 'tributos' in dic:
query = ("INSERT INTO %(tributo)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["tributo"]
for item in dic['tributos']:
fields = ','.join([campos["tributo"].get(k, k) for k,t,n in TRIBUTO if k in item])
values = ','.join(['?' for k,t,n in TRIBUTO if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in TRIBUTO if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in TRIBUTO if k in item])
if 'ivas' in dic:
query = ("INSERT INTO %(iva)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["iva"]
for item in dic['ivas']:
fields = ','.join([campos["iva"].get(k, k) for k,t,n in IVA if k in item])
values = ','.join(['?' for k,t,n in IVA if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in IVA if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in IVA if k in item])
if commit:
db.commit()
finally:
pass
def modificar(fact, db, schema={}, webservice="wsfev1", ids=None, conf_db={}):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
update = ['cae', 'fecha_vto', 'resultado', 'reproceso', 'motivo_obs', 'err_code', 'err_msg', 'cbte_nro']
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
if fact['cae']=='NULL' or fact['cae']=='' or fact['cae']==None:
fact['cae'] = CAE_NULL
fact['fecha_vto'] = FECHA_VTO_NULL
if 'null' in conf_db and fact['resultado']==None or fact['resultado']=='':
fact['resultado'] = RESULTADO_NULL
for k in ['reproceso', 'motivo_obs', 'err_code', 'err_msg']:
if 'null' in conf_db and k in fact and fact[k]==None or fact[k]=='':
if DEBUG: print k, "NULL"
fact[k] = NULL
try:
query = ("UPDATE %(encabezado)s SET %%%%s WHERE %%(id)s=?" % tablas) % campos["encabezado"]
fields = [campos["encabezado"].get(k, k) for k,t,n in ENCABEZADO if k in update and k in fact]
values = [fact[k] for k,t,n in ENCABEZADO if k in update and k in fact]
query = query % ','.join(["%s=?" % f for f in fields])
if DEBUG: print query, values+[fact['id']]
ejecutar(cur, query, values+[fact['id']] )
db.commit()
except:
raise
finally:
pass
def leer(db, schema={}, webservice="wsfev1", ids=None):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
if not ids:
query = ("SELECT * FROM %(encabezado)s WHERE (%%(resultado)s IS NULL OR %%(resultado)s='' OR %%(resultado)s=' ') AND (%%(id)s IS NOT NULL) AND %%(webservice)s=? ORDER BY %%(tipo_cbte)s, %%(punto_vta)s, %%(cbte_nro)s" % tablas) % campos["encabezado"]
ids = [webservice]
else:
query = ("SELECT * FROM %(encabezado)s WHERE " % tablas) + " OR ".join(["%(id)s=?" % campos["encabezado"] for id in ids])
if DEBUG: print "ejecutando",query, ids
try:
ejecutar(cur, query, ids)
rows = cur.fetchall()
description = cur.description
for row in rows:
detalles = []
encabezado = {}
for i, k in enumerate(description):
val = row[i]
if isinstance(val,str):
val = val.decode(CHARSET)
if isinstance(val,basestring):
val = val.strip()
key = campos_rev["encabezado"].get(k[0], k[0].lower())
val = redondear(ENCABEZADO, key, val)
encabezado[key] = val
print encabezado
detalles = []
if DEBUG: print ("SELECT * FROM %(detalle)s WHERE %%(id)s = ?" % tablas) % campos["detalle"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(detalle)s WHERE %%(id)s = ?" % tablas) % campos["detalle"], [encabezado['id']])
for it in cur.fetchall():
detalle = {}
for i, k in enumerate(cur.description):
val = it[i]
if isinstance(val,str):
val = val.decode(CHARSET)
key = campos_rev["detalle"].get(k[0], k[0].lower())
val = redondear(DETALLE, key, val)
detalle[key] = val
detalles.append(detalle)
encabezado['detalles'] = detalles
cmps_asoc = []
if DEBUG: print ("SELECT * FROM %(cmp_asoc)s WHERE %%(id)s = ?" % tablas) % campos["cmp_asoc"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(cmp_asoc)s WHERE %%(id)s = ?" % tablas) % campos["cmp_asoc"], [encabezado['id']])
for it in cur.fetchall():
cmp_asoc = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["cmp_asoc"].get(k[0], k[0].lower())
cmp_asoc[key] = val
cmps_asoc.append(cmp_asoc)
if cmps_asoc:
encabezado['cbtes_asoc'] = cmps_asoc
permisos = []
if DEBUG: print ("SELECT * FROM %(permiso)s WHERE %%(id)s = ?" % tablas) % campos["permiso"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(permiso)s WHERE %%(id)s = ?" % tablas) % campos["permiso"], [encabezado['id']])
for it in cur.fetchall():
permiso = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["permiso"].get(k[0], k[0].lower())
permiso[key] = val
permisos.append(permiso)
if permisos:
encabezado['permisos'] = permisos
ivas = []
if DEBUG: print ("SELECT * FROM %(iva)s WHERE %%(id)s = ?" % tablas) % campos["iva"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(iva)s WHERE %%(id)s = ?" % tablas) % campos["iva"], [encabezado['id']])
for it in cur.fetchall():
iva = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["iva"].get(k[0], k[0].lower())
val = redondear(IVA, key, val)
iva[key] = val
ivas.append(iva)
if ivas:
encabezado['ivas'] = ivas
tributos = []
if DEBUG: print ("SELECT * FROM %(tributo)s WHERE %%(id)s = ?" % tablas) % campos["tributo"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(tributo)s WHERE %%(id)s = ?" % tablas) % campos["tributo"], [encabezado['id']])
for it in cur.fetchall():
tributo = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["tributo"].get(k[0], k[0].lower())
val = redondear(TRIBUTO, key, val)
tributo[key] = val
tributos.append(tributo)
if tributos:
encabezado['tributos'] = tributos
yield encabezado
db.commit()
finally:
cur.close()
def ayuda():
print "-- Formato:"
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, DATO, PERMISO
tipos_registro = [
('encabezado', ENCABEZADO),
('detalle', DETALLE),
('tributo', TRIBUTO),
('iva', IVA),
('cmp_asoc', CMP_ASOC),
('permiso', PERMISO),
('dato', DATO),
]
print "-- Esquema:"
for sql in esquema_sql(tipos_registro):
print sql
if __name__ == "__main__":
ayuda()
| gpl-3.0 |
diagramsoftware/odoo | addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kermitfr/kermit-webui | src/webui/platforms/oc4j/applications.py | 1 | 2735 | '''
Created on Oct 25, 2011
@author: mmornati
'''
from webui.servers.models import Server
from guardian.shortcuts import get_objects_for_user
import logging
from webui.platforms.oc4j.utils import extract_appli_info, check_contains,\
extract_appli_details
from webui.platforms.abstracts import Application
from webui.platforms.oc4j import settings
from webui.platforms.platforms import platforms
from webui.servers import utils
logger = logging.getLogger(__name__)
class OC4JApplication(Application):
def getApplications(self, user):
servers = utils.extract_user_servers(user)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_info(server.hostname, environment)
if appli:
for app in appli:
extracted = check_contains(applications, app)
if extracted:
extracted["deploy"] = extracted["deploy"] + 1
extracted["servers"].append(app["servers"])
else:
applications.append(app)
return applications
def getApplicationsPath(self, user, server_path):
servers = utils.extract_user_servers_in_path(user, server_path)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_info(server.hostname, environment)
if appli:
for app in appli:
extracted = check_contains(applications, app)
if extracted:
extracted["deploy"] = extracted["deploy"] + 1
extracted["servers"].append(app["servers"])
else:
applications.append(app)
return applications
def getAppliInfo(self, user, appname):
servers = utils.extract_user_servers(user)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_details(server.hostname, environment, appname)
if appli:
applications.extend(appli)
return applications
platforms.register(OC4JApplication, settings.PLATFORM_NAME) | gpl-3.0 |
GovCERT-CZ/dionaea | modules/python/util/logsql2postgres.py | 3 | 10245 | #!/opt/dionaea/bin/python3
# sudo su postgres
# createdb --owner=xmpp logsql
# psql -U xmpp logsql < modules/python/util/xmpp/pg_schema.sql
import sqlite3
import postgresql.driver as pg_driver
import optparse
def copy(name, lite, pg, src, dst):
print("[+] {0}".format(name))
pg.execute("DELETE FROM {0}".format(dst['table']))
offset = 0
limit = 10000
insert = pg.prepare(dst['query'])
while True:
result = lite.execute(src['query'].format(limit, offset))
r = 0
result = result.fetchall()
r = len(result)
insert.load_rows(result)
# print("{0} {1} {2}".format(offset, limit, r))
if r != limit:
# update the sequence if we inserted rows
if offset + r != 0:
pg.execute(
"SELECT setval('{0}',{1})".format(dst['seq'], offset + r))
break
offset += limit
cando = {
'connections' : ({
# FIXME postgres does not know connection_type pending
# connection_type is an enum, so this may get messy
'query' : """SELECT
connection,
connection_type,
connection_transport,
datetime(connection_timestamp, 'unixepoch') || ' UTC' AS connection_timestamp,
connection_parent,
connection_root,
ifnull(nullif(local_host,''),'0.0.0.0'),
local_port,
ifnull(nullif(remote_host,''),'0.0.0.0'),
remote_port,
connection_protocol,
remote_hostname FROM connections WHERE connection_type != 'pending' LIMIT {:d} OFFSET {:d} \n"""
},
{
'table' : 'dionaea.connections',
'seq' : "dionaea.connections_connection_seq",
'query' : """INSERT INTO dionaea.connections
(connection,
connection_type,
connection_transport,
connection_timestamp,
connection_parent,
connection_root,
local_host,
local_port,
remote_host,
remote_port,
connection_protocol,
remote_hostname)
VALUES
($1,$2,$3,$4::text::timestamp,$5,$6,$7::text::inet,$8,$9::text::inet,$10,$11,$12)""",
}),
'dcerpcbinds': ({
'query' : """SELECT
dcerpcbind,
connection,
dcerpcbind_uuid,
dcerpcbind_transfersyntax FROM dcerpcbinds LIMIT {:d} OFFSET {:d} \n"""
},
{
'table' : 'dionaea.dcerpcbinds',
'seq' : "dionaea.dcerpcbinds_dcerpcbind_seq",
'query' : """INSERT INTO dionaea.dcerpcbinds
(dcerpcbind,
connection,
dcerpcbind_uuid,
dcerpcbind_transfersyntax)
VALUES
($1,$2,$3,$4)""",
}),
'dcerpcrequests' : ({
'query' : """SELECT
dcerpcrequest,
connection,
dcerpcrequest_uuid,
dcerpcrequest_opnum FROM dcerpcrequests LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcrequests',
'seq' : "dionaea.dcerpcrequests_dcerpcrequest_seq",
'query' : """INSERT INTO dionaea.dcerpcrequests
(dcerpcrequest,
connection,
dcerpcrequest_uuid,
dcerpcrequest_opnum)
VALUES
($1,$2,$3,$4)""",
}),
'dcerpcservices' : ({
'query' : """SELECT
dcerpcservice,
dcerpcservice_uuid,
dcerpcservice_name FROM dcerpcservices LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcservices',
'seq' : "dionaea.dcerpcservices_dcerpcservice_seq",
'query' : """INSERT INTO dionaea.dcerpcservices
(dcerpcservice,
dcerpcservice_uuid,
dcerpcservice_name)
VALUES
($1,$2,$3)""",
}),
'dcerpcserviceops' : ({
'query' : """SELECT
dcerpcserviceop,
dcerpcservice,
dcerpcserviceop_name,
dcerpcserviceop_opnum,
dcerpcserviceop_vuln
FROM dcerpcserviceops LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcserviceops',
'seq' : "dionaea.dcerpcserviceops_dcerpcserviceop_seq",
'query' : """INSERT INTO dionaea.dcerpcserviceops
(dcerpcserviceop,
dcerpcservice,
dcerpcserviceop_name,
dcerpcserviceop_opnum,
dcerpcserviceop_vuln)
VALUES
($1,$2,$3,$4,$5)""",
}),
'downloads' : ({
'query' : """SELECT
download,
connection,
download_md5_hash,
download_url FROM downloads LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.downloads',
'seq' : "dionaea.dcerpcrequests_dcerpcrequest_seq",
'query' : """INSERT INTO dionaea.downloads
(download,
connection,
download_md5_hash,
download_url)
VALUES
($1,$2,$3,$4)""",
}),
'emu_profiles' : ({
'query' : """SELECT
emu_profile,
connection,
emu_profile_json FROM emu_profiles LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.emu_profiles',
'seq' : "dionaea.emu_profiles_emu_profile_seq",
'query' : """INSERT INTO dionaea.emu_profiles
(emu_profile,
connection,
emu_profile_json)
VALUES
($1,$2,$3)""",
}),
'emu_services' : ({
'query' : """SELECT
emu_serivce,
connection,
emu_service_url FROM emu_services LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.emu_services',
'seq' : "dionaea.emu_services_emu_service_seq",
'query' : """INSERT INTO dionaea.emu_services
(emu_service,
connection,
emu_service_url)
VALUES
($1,$2,$3)""",
}),
'offers' : ({
'query' : """SELECT
offer,
connection,
offer_url FROM offers LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.offers',
'seq' : "dionaea.offers_offer_seq",
'query' : """INSERT INTO dionaea.offers
(offer,
connection,
offer_url)
VALUES
($1,$2,$3)""",
}),
'p0fs' : (
{ 'query' : """SELECT
p0f,
connection,
p0f_genre,
p0f_link,
p0f_detail,
p0f_uptime,
p0f_tos,
p0f_dist,
p0f_nat,
p0f_fw FROM p0fs LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.p0fs',
'seq' : "dionaea.p0fs_p0f_seq",
'query' : """INSERT INTO dionaea.p0fs
( p0f,
connection,
p0f_genre,
p0f_link,
p0f_detail,
p0f_uptime,
p0f_tos,
p0f_dist,
p0f_nat,
p0f_fw)
VALUES
($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)""",
}),
'virustotals': (
{ 'query' : """SELECT
virustotal,
virustotal_md5_hash,
datetime(virustotal_timestamp, 'unixepoch') || ' UTC' AS virustotal_timestamp,
virustotal_permalink
FROM virustotals LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.virustotals',
'seq' : "dionaea.virustotals_virustotal_seq",
'query' : """INSERT INTO dionaea.virustotals
(
virustotal,
virustotal_md5_hash,
virustotal_timestamp,
virustotal_permalink
)
VALUES
($1,$2,$3::text::timestamptz,$4)""",
}),
'virustotalscans': (
{ 'query' : """SELECT
virustotalscan,
virustotal,
virustotalscan_scanner,
nullif(virustotalscan_result,'')
FROM virustotalscans LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.virustotalscans',
'seq' : "dionaea.virustotalscans_virustotalscan_seq",
'query' : """INSERT INTO dionaea.virustotalscans
(
virustotalscan,
virustotal,
virustotalscan_scanner,
virustotalscan_result
)
VALUES
($1,$2,$3,$4)""",
}),
# x
'mssql_fingerprints': (
{ 'query' : """SELECT
mssql_fingerprint,
connection,
mssql_fingerprint_hostname,
mssql_fingerprint_appname,
mssql_fingerprint_cltintname FROM mssql_fingerprints LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.mssql_fingerprints',
'seq' : "dionaea.mssql_fingerprints_mssql_fingerprint_seq",
'query' : """INSERT INTO dionaea.mssql_fingerprints
(
mssql_fingerprint,
connection,
mssql_fingerprint_hostname,
mssql_fingerprint_appname,
mssql_fingerprint_cltintname
)
VALUES
($1,$2,$3,$4,$5)""",
}),
'mssql_commands': (
{ 'query' : """SELECT
mssql_command,
connection,
mssql_command_status,
mssql_command_cmd FROM mssql_commands LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.mssql_commands',
'seq' : "dionaea.mssql_commands_mssql_command_seq",
'query' : """INSERT INTO dionaea.mssql_commands
(
mssql_command,
connection,
mssql_command_status,
mssql_command_cmd
)
VALUES
($1,$2,$3,$4)""",
}),
'logins': (
{ 'query' : """SELECT
login,
connection,
login_username,
login_password FROM logins LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.logins',
'seq' : "dionaea.logins_login_seq",
'query' : """INSERT INTO dionaea.logins
(
login,
connection,
login_username,
login_password
)
VALUES
($1,$2,$3,$4)""",
})
}
if __name__ == "__main__":
p = optparse.OptionParser()
p.add_option('-s', '--database-host', dest='database_host',
help='localhost:5432', type="string", action="store")
p.add_option('-d', '--database', dest='database',
help='for example xmpp', type="string", action="store")
p.add_option('-u', '--database-user', dest='database_user',
help='for example xmpp', type="string", action="store")
p.add_option('-p', '--database-password', dest='database_password',
help='the database users password', type="string", action="store")
p.add_option('-f', '--sqlite-file', dest='sqlite_file',
help='path to sqlite db', type="string", action="store")
(options, args) = p.parse_args()
if len(args) == 0:
print("use {} as args".format( ' '.join(cando.keys()) ) )
db = {}
db['sqlite'] = {}
db['sqlite']['dbh'] = sqlite3.connect(options.sqlite_file)
db['sqlite']['cursor'] = db['sqlite']['dbh'].cursor()
db['pg'] = {}
db['pg']['dbh'] = pg_driver.connect(
user = options.database_user,
password = options.database_password,
database = options.database,
host = options.database_host,
port = 5432)
for i in args:
if i in cando:
copy(i,
db['sqlite']['cursor'],
db['pg']['dbh'],
cando[i][0],
cando[i][1])
# db['pg']['dbh'].commit()
| gpl-2.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/perf/page_sets/extension_profile_shared_state.py | 8 | 1202 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
from profile_creators import extension_profile_extender
from profile_creators import profile_generator
from telemetry.page import shared_page_state
class ExtensionProfileSharedState(shared_page_state.SharedPageState):
"""Shared state tied with extension profile.
Generates extension profile on initialization.
"""
def __init__(self, test, finder_options, story_set):
super(ExtensionProfileSharedState, self).__init__(
test, finder_options, story_set)
generator = profile_generator.ProfileGenerator(
extension_profile_extender.ExtensionProfileExtender,
'extension_profile')
self._out_dir = generator.Run(finder_options)
if self._out_dir:
finder_options.browser_options.profile_dir = self._out_dir
else:
finder_options.browser_options.dont_override_profile = True
def TearDownState(self):
"""Clean up generated profile directory."""
super(ExtensionProfileSharedState, self).TearDownState()
if self._out_dir:
shutil.rmtree(self._out_dir)
| mit |
namccart/gnuradio | grc/python/epy_block_io.py | 5 | 2648 |
import inspect
import collections
from gnuradio import gr
import pmt
TYPE_MAP = {
'complex64': 'complex', 'complex': 'complex',
'float32': 'float', 'float': 'float',
'int32': 'int', 'uint32': 'int',
'int16': 'short', 'uint16': 'short',
'int8': 'byte', 'uint8': 'byte',
}
BlockIO = collections.namedtuple('BlockIO', 'name cls params sinks sources doc')
def _ports(sigs, msgs):
ports = list()
for i, dtype in enumerate(sigs):
port_type = TYPE_MAP.get(dtype.name, None)
if not port_type:
raise ValueError("Can't map {0:!r} to GRC port type".format(dtype))
ports.append((str(i), port_type))
for msg_key in msgs:
if msg_key == 'system':
continue
ports.append((msg_key, 'message'))
return ports
def _blk_class(source_code):
ns = {}
try:
exec source_code in ns
except Exception as e:
raise ValueError("Can't interpret source code: " + str(e))
for var in ns.itervalues():
if inspect.isclass(var)and issubclass(var, gr.gateway.gateway_block):
return var
raise ValueError('No python block class found in code')
def extract(cls):
if not inspect.isclass(cls):
cls = _blk_class(cls)
spec = inspect.getargspec(cls.__init__)
defaults = map(repr, spec.defaults or ())
doc = cls.__doc__ or cls.__init__.__doc__ or ''
cls_name = cls.__name__
if len(defaults) + 1 != len(spec.args):
raise ValueError("Need all __init__ arguments to have default values")
try:
instance = cls()
except Exception as e:
raise RuntimeError("Can't create an instance of your block: " + str(e))
name = instance.name()
params = list(zip(spec.args[1:], defaults))
sinks = _ports(instance.in_sig(),
pmt.to_python(instance.message_ports_in()))
sources = _ports(instance.out_sig(),
pmt.to_python(instance.message_ports_out()))
return BlockIO(name, cls_name, params, sinks, sources, doc)
if __name__ == '__main__':
blk_code = """
import numpy as np
from gnuradio import gr
import pmt
class blk(gr.sync_block):
def __init__(self, param1=None, param2=None):
"Test Docu"
gr.sync_block.__init__(
self,
name='Embedded Python Block',
in_sig = (np.float32,),
out_sig = (np.float32,np.complex64,),
)
self.message_port_register_in(pmt.intern('msg_in'))
self.message_port_register_out(pmt.intern('msg_out'))
def work(self, inputs_items, output_items):
return 10
"""
print extract(blk_code)
| gpl-3.0 |
garg10may/youtube-dl | youtube_dl/extractor/telemb.py | 177 | 2964 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import remove_start
class TeleMBIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
_TESTS = [
{
'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
'md5': 'f45ea69878516ba039835794e0f8f783',
'info_dict': {
'id': '13466',
'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
'ext': 'mp4',
'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
'description': 'md5:bc5225f47b17c309761c856ad4776265',
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
}
},
{
# non-ASCII characters in download URL
'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
'md5': '6e9682736e5ccd4eab7f21e855350733',
'info_dict': {
'id': '13514',
'display_id': 'les-reportages-havre-incendie-mortel',
'ext': 'mp4',
'title': 'Havré - Incendie mortel - Les reportages',
'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
formats = []
for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
fmt = {
'url': video_url,
'format_id': video_url.split(':')[0]
}
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
if rtmp:
fmt.update({
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
'page_url': 'http://www.telemb.be',
'preference': -1,
})
formats.append(fmt)
self._sort_formats(formats)
title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
description = self._html_search_regex(
r'<meta property="og:description" content="(.+?)" />',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
FireWalkerX/eyeOS-FOSS-V.2.0 | devtools/qooxdoo-sdk/tool/pylib/graph/classes/Digraph.py | 4 | 19100 | # Copyright (c) 2007-2009 Pedro Matiello <[email protected]>
# Nathan Davis <[email protected]>
# Zsolt Haraszti <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Digraph class
"""
# Imports
from graph import *
from graph.algorithms import filters
class digraph (object):
"""
Digraph class.
Digraphs are built of nodes and directed edges.
@sort: __init__, __getitem__, __iter__, __len__, __str__, add_edge, add_edge_attribute,
add_graph, add_node, add_node_attribute, add_nodes, add_spanning_tree, complete, degree,
del_edge, del_node, edges, get_edge_attributes, get_edge_label, get_edge_weight,
get_node_attributes, has_edge, has_node, incidents, inverse, neighbors, nodes, order,
set_edge_label, set_edge_weight, traversal, generate, read, write, accessibility,
breadth_first_search, cut_edges, cut_nodes, depth_first_search, heuristic_search,
minimal_spanning_tree, mutual_accessibility, shortest_path, topological_sorting
"""
def __init__(self):
"""
Initialize a digraph.
"""
self.node_neighbors = {} # Pairing: Node -> Neighbors
self.edge_properties = {} # Pairing: Edge -> (Label, Weight)
self.node_incidence = {} # Pairing: Node -> Incident nodes
self.node_attr = {} # Pairing: Node -> Attributes
self.edge_attr = {} # Pairing: Edge -> Attributes
def __str__(self):
"""
Return a string representing the digraph when requested by str() (or print).
@rtype: string
@return: String representing the graph.
"""
return "<graph object " + str(self.nodes()) + " " + str(self.edges()) + ">"
def __len__(self):
"""
Return the order of the digraph when requested by len().
@rtype: number
@return: Size of the graph.
"""
return len(self.node_neighbors)
def __iter__(self):
"""
Return a iterator passing through all nodes in the digraph.
@rtype: iterator
@return: Iterator passing through all nodes in the digraph.
"""
for each in self.node_neighbors.iterkeys():
yield each
def __getitem__(self, node):
"""
Return a iterator passing through all neighbors of the given node.
@rtype: iterator
@return: Iterator passing through all neighbors of the given node.
"""
for each in self.node_neighbors[node]:
yield each
def read(self, string, fmt='xml'):
"""
Read a graph from a string. Nodes and edges specified in the input will be added to the
current graph.
@type string: string
@param string: Input string specifying a graph.
@type fmt: string
@param fmt: Input format. Possible formats are:
1. 'xml' - XML (default)
"""
if (fmt == 'xml'):
readwrite.read_xml(self, string)
def write(self, fmt='xml'):
"""
Write the graph to a string. Depending of the output format, this string can be used by
read() to rebuild the graph.
@type fmt: string
@param fmt: Output format. Possible formats are:
1. 'xml' - XML (default)
2. 'dot' - DOT Language (for GraphViz)
3. 'dotwt' - DOT Language with weight information
@rtype: string
@return: String specifying the graph.
"""
if (fmt == 'xml'):
return readwrite.write_xml(self)
elif (fmt == 'dot'):
return readwrite.write_dot_digraph(self, False)
elif (fmt == 'dotwt'):
return readwrite.write_dot_digraph(self, True)
def generate(self, num_nodes, num_edges, weight_range=(1, 1)):
"""
Add nodes and random edges to the graph.
@type num_nodes: number
@param num_nodes: Number of nodes.
@type num_edges: number
@param num_edges: Number of edges.
@type weight_range: tuple
@param weight_range: tuple of two integers as lower and upper limits on randomly generated
weights (uniform distribution).
"""
generators.generate(self, num_nodes, num_edges, weight_range)
def nodes(self):
"""
Return node list.
@rtype: list
@return: Node list.
"""
return self.node_neighbors.keys()
def neighbors(self, node):
"""
Return all nodes that are directly accessible from given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.
"""
return self.node_neighbors[node]
def incidents(self, node):
"""
Return all nodes that are incident to the given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.
"""
return self.node_incidence[node]
def edges(self):
"""
Return all edges in the graph.
@rtype: list
@return: List of all edges in the graph.
"""
return self.edge_properties.keys()
def has_node(self, node):
"""
Return whether the requested node exists.
@type node: node
@param node: Node identifier
@rtype: boolean
@return: Truth-value for node existence.
"""
return self.node_neighbors.has_key(node)
def add_node(self, node, attrs=[]):
"""
Add given node to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only numbers
and single-line strings as node identifiers if you intend to use write().
@type node: node
@param node: Node identifier.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value) tuples.
"""
if (node not in self.node_neighbors):
self.node_neighbors[node] = []
self.node_incidence[node] = []
self.node_attr[node] = attrs
def add_nodes(self, nodelist):
"""
Add given nodes to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only numbers
and single-line strings as node identifiers if you intend to use write().
@type nodelist: list
@param nodelist: List of nodes to be added to the graph.
"""
for each in nodelist:
self.add_node(each)
def add_edge(self, u, v, wt=1, label='', attrs=[]):
"""
Add an directed edge (u,v) to the graph connecting nodes u to v.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type wt: number
@param wt: Edge weight.
@type label: string
@param label: Edge label.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value) tuples.
"""
if (v not in self.node_neighbors[u]):
self.node_neighbors[u].append(v)
self.node_incidence[v].append(u)
self.edge_properties[(u, v)] = [label, wt]
self.edge_attr[(u, v)] = attrs
def del_node(self, node):
"""
Remove a node from the graph.
@type node: node
@param node: Node identifier.
"""
for each in list(self.incidents(node)):
self.del_edge(each, node)
for each in list(self.neighbors(node)):
self.del_edge(node, each)
del(self.node_neighbors[node])
del(self.node_incidence[node])
del(self.node_attr[node])
def del_edge(self, u, v):
"""
Remove an directed edge (u, v) from the graph.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
"""
self.node_neighbors[u].remove(v)
self.node_incidence[v].remove(u)
del(self.edge_properties[(u,v)])
del(self.edge_attr[(u,v)])
def get_edge_weight(self, u, v):
"""
Get the weight of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: number
@return: Edge weight.
"""
return self.edge_properties[(u, v)][1]
def set_edge_weight(self, u, v, wt):
"""
Set the weight of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type wt: number
@param wt: Edge weight.
"""
self.edge_properties[(u, v)][1] = wt
def get_edge_label(self, u, v):
"""
Get the label of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: string
@return: Edge label
"""
return self.edge_properties[(u, v)][0]
def set_edge_label(self, u, v, label):
"""
Set the label of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type label: string
@param label: Edge label.
"""
self.edge_properties[(u, v)][0] = label
def add_node_attribute(self, node, attr):
"""
Add attribute to the given node.
@type node: node
@param node: Node identifier
@type attr: tuple
@param attr: Node attribute specified as a tuple in the form (attribute, value).
"""
self.node_attr[node] = self.node_attr[node] + [attr]
def get_node_attributes(self, node):
"""
Return the attributes of the given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of attributes specified tuples in the form (attribute, value).
"""
return self.node_attr[node]
def add_edge_attribute(self, u, v, attr):
"""
Add attribute to the given edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type attr: tuple
@param attr: Node attribute specified as a tuple in the form (attribute, value).
"""
self.edge_attr[(u,v)] = self.edge_attr[(u,v)] + [attr]
def get_edge_attributes(self, u, v):
"""
Return the attributes of the given edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: list
@return: List of attributes specified tuples in the form (attribute, value).
"""
return self.edge_attr[(u,v)]
def has_edge(self, u, v):
"""
Return whether an edge between nodes u and v exists.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: boolean
@return: Truth-value for edge existence.
"""
return self.edge_properties.has_key((u,v))
def order(self, node):
"""
Return the order of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.neighbors(node))
def degree(self, node):
"""
Return the degree of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.node_incidence[node])
def complete(self):
"""
Make the graph a complete graph.
@attention: This will modify the current graph.
"""
for each in self.nodes():
for other in self.nodes():
if (each != other):
self.add_edge(each, other)
def inverse(self):
"""
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
"""
inv = digraph()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
inv.del_edge(each[0], each[1])
return inv
def add_graph(self, graph):
"""
Add other graph to the graph.
@attention: Attributes and labels are not preserved.
@type graph: graph
@param graph: Graph
"""
self.add_nodes(graph.nodes())
for each_node in graph.nodes():
for each_edge in graph.neighbors(each_node):
self.add_edge(each_node, each_edge)
def add_spanning_tree(self, st):
"""
Add a spanning tree to the graph.
@type st: dictionary
@param st: Spanning tree.
"""
self.add_nodes(st.keys())
for each in st:
if (st[each] is not None):
self.add_edge(st[each], each)
def traversal(self, node, order='pre'):
"""
Graph traversal iterator.
@type node: node
@param node: Node.
@type order: string
@param order: traversal ordering. Possible values are:
2. 'pre' - Preordering (default)
1. 'post' - Postordering
@rtype: iterator
@return: Traversal iterator.
"""
for each in traversal.traversal(self, node, order):
yield each
def depth_first_search(self, root=None, filter=filters.null()):
"""
Depht-first search.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: tuple
@return: tupple containing a dictionary and two lists:
1. Generated spanning tree
2. Graph's preordering
3. Graph's postordering
"""
return searching.depth_first_search(self, root, filter)
def accessibility(self):
"""
Accessibility matrix (transitive closure).
@rtype: dictionary
@return: Accessibility information for each node.
"""
return accessibility.accessibility(self)
def breadth_first_search(self, root=None, filter=filters.null()):
"""
Breadth-first search.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: A tuple containing a dictionary and a list.
1. Generated spanning tree
2. Graph's level-based ordering
"""
return searching.breadth_first_search(self, root, filter=filter)
def mutual_accessibility(self):
"""
Mutual-accessibility matrix (strongly connected components).
@rtype: list
@return: Mutual-accessibility information for each node.
"""
return accessibility.mutual_accessibility(self)
def topological_sorting(self):
"""
Topological sorting.
@attention: Topological sorting is meaningful only for directed acyclic graphs.
@rtype: list
@return: Topological sorting for the graph.
"""
return sorting.topological_sorting(self)
def minimal_spanning_tree(self, root=None):
"""
Minimal spanning tree.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@attention: Minimal spanning tree meaningful only for weighted graphs.
@rtype: list
@return: Generated spanning tree.
"""
return minmax.minimal_spanning_tree(self, root)
def shortest_path(self, source):
"""
Return the shortest path distance between source node and all other nodes using Dijkstra's
algorithm.
@attention: All weights must be nonnegative.
@type source: node
@param source: Node from which to start the search.
@rtype: tuple
@return: A tuple containing two dictionaries, each keyed by target nodes.
1. Shortest path spanning tree
2. Shortest distance from given source to each target node
Inaccessible target nodes do not appear in either dictionary.
"""
return minmax.shortest_path(self, source)
def heuristic_search(self, start, goal, heuristic):
"""
A* search algorithm.
A set of heuristics is available under C{graph.heuristics}. User-created heuristics are
allowed too.
@type start: node
@param start: Start node
@type goal: node
@param goal: Goal node
@type heuristic: function
@param heuristic: Heuristic function
@rtype: list
@return: Optimized path from start to goal node
"""
return minmax.heuristic_search(self, start, goal, heuristic)
def cut_edges(self):
"""
Return the cut-edges of the given graph.
@rtype: list
@return: List of cut-edges.
"""
return accessibility.cut_edges(self)
def cut_nodes(self):
"""
Return the cut-nodes of the given graph.
@rtype: list
@return: List of cut-nodes.
"""
return accessibility.cut_nodes(self)
def find_cycle(self):
"""
Find a cycle in the digraph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@rtype: list
@return: List of nodes.
"""
return cycles.find_cycle(self, directed=True)
| agpl-3.0 |
geosolutions-it/wps-remote | src/wpsremote/xmpp_data/configs/myservice/code/test.py | 1 | 3686 | # (c) 2016 Open Source Geospatial Foundation - all rights reserved
# (c) 2014 - 2015 Centre for Maritime Research and Experimentation (CMRE)
# (c) 2013 - 2014 German Aerospace Center (DLR)
# This code is licensed under the GPL 2.0 license, available at the root
# application directory.
import subprocess
import logging.config
import logging
import argparse
import sys
import os
import uuid
import zipfile
import time
# constants
# id = os.urandom(10)
id = str(uuid.uuid4())
gdalContour = r'/usr/bin/gdal_contour'
dst = r'contour_'+id[:13]
src = '%s/../../../resource_dir/srtm_39_04/srtm_39_04_c.tif' % os.path.dirname(os.path.abspath(__file__))
cmd = '-a elev' # just for example!
interval = '-i'
class GDALTest(object):
def __init__(self, args):
self.args = args
self.create_logger("logger_test.properties")
self.logger.info("ProgressInfo:0.0%")
def run(self):
trg = '%s/../../../output/%s/%s.shp' % (os.path.dirname(os.path.abspath(__file__)), self.args.execution_id, dst)
# fullCmd = ' '.join([gdalContour, cmd, self.youCanQuoteMe(src), \
# self.youCanQuoteMe(dst), interval, self.args.interval])
fullCmd = ' '.join([gdalContour, cmd, src, trg, interval, self.args.interval])
self.logger.debug("Running command > " + fullCmd)
self.logger.info("going to sleep again...")
time.sleep(30) # Delays for 30 seconds. You can also use a float value.
proc = subprocess.Popen(fullCmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
for line in proc.stdout:
self.logger.info(line)
# call communicate to retrieve return code of subprocess
proc.communicate()
ret = proc.returncode
self.logger.info("...waking up and going to sleep again...")
time.sleep(30) # Delays for 30 seconds. You can also use a float value.
if (ret == 0):
# zipf = zipfile.ZipFile(self.args.workdir+'/contour.zip', 'w')
# self.zipdir(self.args.workdir+'/', zipf)
output_dir = '%s/../../../output/%s' % (os.path.dirname(os.path.abspath(__file__)), self.args.execution_id)
zipf = zipfile.ZipFile(output_dir+'/contour.zip', 'w')
self.zipdir(output_dir+'/', zipf)
zipf.close()
self.logger.info("ProgressInfo:100%")
else:
self.logger.critical("Error occurred during processing.")
return ret
# see note below
def youCanQuoteMe(self, item):
return "\"" + item + "\""
def zipdir(self, path, zip):
for root, dirs, files in os.walk(path):
files = [fi for fi in files if fi.startswith(dst)]
for file in files:
zip.write(os.path.join(root, file))
def create_logger(self, logger_config_file):
defaults = {}
logging.config.fileConfig(str(logger_config_file), defaults=defaults)
self.logger = logging.getLogger("main.create_logger")
self.logger.debug("Logger initialized with file " + str(logger_config_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interval", nargs='?', default="10", help="Elevation interval between contours.")
parser.add_argument("-w", "--workdir", nargs='?', default="", help="Remote process sandbox working directory.")
parser.add_argument("-e", "--execution_id", nargs='?', default="", help="Remote process Unique Execution Id.")
cmdargs = parser.parse_args()
gdalTest = GDALTest(cmdargs)
return_code = gdalTest.run()
sys.exit(return_code)
| gpl-2.0 |
majdigital/bigworldgraph | backend/bwg/wikidata_mixins.py | 1 | 13085 | # -*- coding: utf-8 -*-
"""
This module provides two different way to access Wikidata:
* Through the Wikimedia API with ``Pywikibot`` as a wrapper
* Over a scraper using ``BeautifulSoup4``
Currently, accessing the data via the API is faster than the scraper.
"""
# STD
import abc
import hashlib
import threading
# EXT
import pywikibot
from pywikibot.data import api
# PROJECT
from bwg.helpers import construct_dict_from_source
from bwg.serializing import retry_with_fallback
class RequestCache:
"""
Special class used as a Cache, so that requests being made don't have to be repeated if they occurred in the past.
"""
def __init__(self):
self.lock = threading.Lock()
self.cache = {}
self.requested = set()
self.number_of_requests = 0
self.number_of_avoided_requests = 0
def __contains__(self, item):
return item in self.requested
def __delitem__(self, key):
del self.cache[key]
self.requested.remove(key)
def __getitem__(self, key):
return self.cache[key]
def __setitem__(self, key, value):
self.cache[key] = value
self.requested.add(key)
def __enter__(self):
self.lock.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
def __len__(self):
return len(self.requested)
def request(self, key, request_func, *request_args, **request_kwargs):
"""
Make a request, but make a lookup to the cache first to see if you may be able to avoid it.
:param key: Key that should be used to cache the request.
:type key: str, int
:param request_func: Function to do the request.
:type request_func: func
:param request_args: Arguments for request.
:type request_args: tuple
:param request_kwargs: Key word arguments for request.
:type request_kwargs: dict
"""
if key in self:
self.number_of_avoided_requests += 1
return self[key]
request_result = request_func(*request_args, **request_kwargs)
self.number_of_requests += 1
self[key] = request_result
return request_result
class AbstractWikidataMixin:
"""
Define the functions inheriting subclasses should implement.
"""
@abc.abstractmethod
def get_matches(self, name, language):
"""
Get matches for an entity's name on Wikidata.
:param name: Name of entity.
:type name: str
:param language: Abbreviation of target language.
:type language: str
:return: List of matches.
:rtype: list
"""
pass
@abc.abstractmethod
def get_entity(self, wikidata_id, language, relevant_properties, properties_implying_relations):
"""
Get Wikidata information about an entity based on its identifier.
:param wikidata_id: Wikidata ID of desired entity.
:type wikidata_id: str
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Set of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph.
:type properties_implying_relations: list, set
:return: List of dates about every sense of the entity (un-ambiguous entites just will have one sense).
:rtype: list
"""
pass
class WikidataAPIMixin(AbstractWikidataMixin):
"""
Access Wikidata information via Wikimedia's API.
"""
wikidata_site = pywikibot.Site("wikidata", "wikidata")
request_cache = RequestCache()
match_cache = RequestCache()
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_matches(self, name, language):
"""
Get matches for an entity's name on Wikidata.
:param name: Name of entity.
:type name: str
:param language: Abbreviation of target language.
:type language: str
:return: List of matches.
:rtype: list
"""
additional_request_parameters = {
"action": "wbsearchentities",
"language": language,
"type": "item",
"search": name
}
response = self._request(**additional_request_parameters)
if len(response["search"]) == 0:
return []
return [
construct_dict_from_source(
{
"uri": lambda source: source["concepturi"],
"id": lambda source: source["id"],
"description": lambda source: source["description"],
"label": lambda source: source["label"]
},
search_result
)
for search_result in response["search"]
]
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_entity(self, wikidata_id, language, relevant_properties, properties_implying_relations, recursively=True):
"""
Get Wikidata information about an entity based on its identifier.
:param wikidata_id: Wikidata ID of desired entity.
:type wikidata_id: str
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Dict of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph. The properties are the keys and the entity node
class they're implying are the values.
:type properties_implying_relations: dict
:param recursively: Request data for fof nodes recursively.
:type recursively: bool
:return: Wikidata entity as dictionary
:rtype: dict
"""
additional_request_parameters = {
"ids": wikidata_id
}
response = self._request(**additional_request_parameters)
if len(response["entities"]) == 0:
return {}
return [
construct_dict_from_source(
{
"aliases": lambda source: [alias_dict["value"] for alias_dict in source["aliases"][language]],
"description": lambda source: source["descriptions"][language]["value"],
"id": lambda source: source["id"],
"label": lambda source: source["labels"][language]["value"],
"modified": lambda source: source["modified"],
"claims": lambda source: self.resolve_claims(
source["claims"], language=language,
relevant_properties=relevant_properties,
properties_implying_relations=properties_implying_relations,
recursively=recursively
) if recursively else {}
},
entity
)
for id_, entity in response["entities"].items()
][0]
@retry_with_fallback(triggering_error=KeyError, language="en")
def resolve_claims(self, claims, language, relevant_properties, properties_implying_relations, recursively=True):
"""
Resolve the claims (~ claimed facts) about a wikidata entity.
:param claims: Dictionary with property ID as key and claim data as value.
:type claims: dict
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Set of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph.
:type properties_implying_relations: list, set
:param recursively: Request data for fof nodes recursively.
:type recursively: bool
:return: List of dates about every sense of the entity (un-ambiguous entities just will have one sense).
:rtype: list
"""
properties = {}
for property_id, claim in claims.items():
if property_id in relevant_properties:
property_name = self.get_property_name(property_id, language=language)
if property_id != "P18":
target = self.get_entity_name(claim[0]["mainsnak"]["datavalue"]["value"]["id"], language=language)
else:
# Handle images differently
target = self.get_image_url(claim[0]["mainsnak"]["datavalue"]["value"])
property_data = {
"target": target,
"implies_relation": property_id in properties_implying_relations,
"entity_class": properties_implying_relations.get(property_id, None),
}
if property_id in properties_implying_relations:
target_senses = self.match_cache.request(
target, self.get_matches, target, language=language
)
property_data["target_data"] = [
self.request_cache.request(
target_sense["id"], self.get_entity,
target_sense["id"], language=language,
relevant_properties=relevant_properties,
properties_implying_relations=properties_implying_relations,
recursively=False
)
for target_sense in target_senses
]
else:
property_data["target_data"] = {}
properties[property_name] = property_data
return properties
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_property_name(self, property_id, language):
"""
Get the name of a wikidata property.
:param property_id: Wikidata property ID.
:type property_id: str
:param language: Abbreviation of target language.
:type language: str
:return: Name of property.
:rtype: str
"""
additional_request_parameters = {
"ids": property_id
}
response = self._request(**additional_request_parameters)
return [
entity["labels"][language]["value"]
for id_, entity in response["entities"].items()
][0]
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_entity_name(self, entity_id, language):
"""
Get the name of a wikidata entity.
:param entity_id: Wikidata property ID.
:type entity_id: str
:param language: Abbreviation of target language.
:type language: str
:return: Name of entity.
:rtype: str
"""
additional_request_parameters = {
"ids": entity_id
}
response = self._request(**additional_request_parameters)
return [
entity["labels"][language]["value"]
for id_, entity in response["entities"].items()
][0]
def _request(self, **additional_request_parameters):
"""
Send a request to the API.
:param additional_request_parameters: Additional parameters for the request that is being sent to the API.
:type additional_request_parameters: dict
:return: Response following the request.
:rtype: dict
"""
request_parameters = {
"site": self.wikidata_site,
"action": 'wbgetentities',
"format": 'json',
"use_get": True,
"throttle": False,
"max_retries": 30,
"maxlag": 20,
"retry_wait": 20
}
request_parameters.update(additional_request_parameters)
request = api.Request(**request_parameters)
return request.submit()
@staticmethod
def get_image_url(image_name):
"""
Generate Wikidata URL for a Wikidata image.
:param image_name: Name of image as given by the API request.
:type image_name: str
:return: Link to image.
:rtype: str
"""
# See http://stackoverflow.com/questions/34393884/how-to-get-image-url-property-from-wikidata-item-by-api
# for explanation
image_name = image_name.replace(" ", "_")
md5_sum = hashlib.md5(image_name.encode('utf-8')).hexdigest()
return "https://upload.wikimedia.org/wikipedia/commons/{a}/{ab}/{image_name}".format(
image_name=image_name, a=md5_sum[0], ab=md5_sum[0:2]
)
| mit |
pong3489/TEST_Mission | Lib/site-packages/numpy/lib/utils.py | 54 | 36175 | import os
import sys
import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'get_numpy_include', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
'may_share_memory', 'safe_eval']
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Parameters
----------
type : any, optional
If `type` is not None, the location of the NumPy headers is returned
as well.
Returns
-------
dirs : str or list of str
If `type` is None, `dirs` is a string containing the path to the
numarray headers.
If `type` is not None, `dirs` is a list of strings with first the
path(s) to the numarray headers, followed by the path to the NumPy
headers.
Notes
-----
Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
if sys.version_info < (2, 4):
# Can't set __name__ in 2.3
import new
def _set_function_name(func, name):
func = new.function(func.func_code, func.func_globals,
name, func.func_defaults, func.func_closure)
return func
else:
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.func_name
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in which
case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case
the deprecation message is that `old_name` is deprecated. If given,
the deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the docstring
after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
get_numpy_include = deprecate(get_include, 'get_numpy_include', 'get_include')
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
nd_a = len(ashape)
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += product(ashape, dtype=int)*bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
def may_share_memory(a, b):
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
if b_low >= a_high or a_low >= b_high:
return False
return True
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name],ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10,maxname)
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print prval + "\n" + "="*(len(prval)+5) + "\n"
for k in range(len(sta)):
val = sta[k]
print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3])
print "\nUpper bound on total bytes = %d" % totalbytes
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is ``stdout``.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent to
``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif type(object) is types.InstanceType: ## check for __call__ method
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print "\n".join(help_text)
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate: bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
from cStringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if os.path.isfile(this_py) and mod_path.endswith('.py'):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
doc = inspect.getdoc(item)
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
For an example of usage, see `safe_eval`.
See Also
--------
safe_eval
"""
if sys.version_info[0] < 3:
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
else:
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
try:
import compiler
except ImportError:
import ast as compiler
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except SyntaxError, err:
raise
#-----------------------------------------------------------------------------
| gpl-3.0 |
errx/django | django/conf/locale/sr/formats.py | 394 | 2011 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
wolverineav/neutron | neutron/services/metering/drivers/noop/noop_driver.py | 53 | 1594 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from neutron.services.metering.drivers import abstract_driver
class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver):
@log_helpers.log_method_call
def update_routers(self, context, routers):
pass
@log_helpers.log_method_call
def remove_router(self, context, router_id):
pass
@log_helpers.log_method_call
def update_metering_label_rules(self, context, routers):
pass
@log_helpers.log_method_call
def add_metering_label_rule(self, context, routers):
pass
@log_helpers.log_method_call
def remove_metering_label_rule(self, context, routers):
pass
@log_helpers.log_method_call
def add_metering_label(self, context, routers):
pass
@log_helpers.log_method_call
def remove_metering_label(self, context, routers):
pass
@log_helpers.log_method_call
def get_traffic_counters(self, context, routers):
pass
| apache-2.0 |
Nicolou/grrSallesRepetition | fckeditor/editor/filemanager/connectors/py/fckcommands.py | 14 | 6293 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| gpl-2.0 |
codeforsanjose/trash-pickup-portal | data/env/lib/python2.7/site-packages/setuptools/extension.py | 229 | 1649 | import sys
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .dist import _get_unpatched
from . import msvc9_support
_Extension = _get_unpatched(distutils.core.Extension)
msvc9_support.patch_for_specialized_compiler()
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext',
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| mit |
Evil-Green/Lonas_KL-GT-I9300-1 | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
SEMCOG/synthpop | synthpop/test/test_categorizer.py | 4 | 3418 | import pytest
import numpy as np
from ..census_helpers import Census
from .. import categorizer as cat
@pytest.fixture
def c():
return Census("827402c2958dcf515e4480b7b2bb93d1025f9389")
@pytest.fixture
def acs_data(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
return df
@pytest.fixture
def pums_data(c):
return c.download_population_pums("06", "07506")
def test_categorize(acs_data, pums_data):
p_acs_cat = cat.categorize(acs_data, {
("population", "total"): "B01001_001E",
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['NAME'])
assert len(p_acs_cat) == 3
assert len(p_acs_cat.columns) == 11
assert len(p_acs_cat.columns.names) == 2
assert p_acs_cat.columns[0][0] == "age"
assert np.all(cat.sum_accross_category(p_acs_cat) < 2)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
pums_data, jd_persons = cat.joint_distribution(
pums_data,
cat.category_combinations(p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
| bsd-3-clause |
ginkgobioworks/edge | src/edge/models/genome_updater.py | 1 | 2960 | from contextlib import contextmanager
from edge.models.fragment import Fragment
class Genome_Updater(object):
"""
Mixin with helpers for updating genome.
"""
@contextmanager
def annotate_fragment_by_name(self, name):
f = [x for x in self.fragments.all() if x.name == name]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
u = f[0].indexed_fragment()
yield u
@contextmanager
def annotate_fragment_by_fragment_id(self, fragment_id):
f = [x for x in self.fragments.all() if x.id == fragment_id]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
u = f[0].indexed_fragment()
yield u
@contextmanager
def update_fragment_by_name(self, name, new_name=None):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(name=name)]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
new_name = name if new_name is None else new_name
u = f[0].indexed_fragment().update(new_name)
yield u
self._add_updated_fragment(u)
@contextmanager
def update_fragment_by_fragment_id(
self, fragment_id, new_name=None, new_fragment=True
):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(id=fragment_id)]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
new_name = f[0].name if new_name is None else new_name
u = f[0].indexed_fragment()
if new_fragment is True:
u = u.update(new_name)
yield u
if new_fragment is True:
self._add_updated_fragment(u)
def add_fragment(self, name, sequence, circular=False):
if len(sequence) == 0:
raise Exception("Cannot create a fragment of length zero")
new_fragment = Fragment.create_with_sequence(
name=name, sequence=sequence, circular=circular
)
self.genome_fragment_set.create(fragment=new_fragment, inherited=False)
return new_fragment
def _add_updated_fragment(self, fragment):
existing_fragment_ids = [f.id for f in self.fragments.all()]
if fragment.parent_id in existing_fragment_ids:
gf = self.genome_fragment_set.get(fragment=fragment.parent)
gf.fragment = fragment
gf.inherited = False
gf.save()
else:
raise Exception("Fragment parent not part of the genome")
| mit |
shuggiefisher/django-on-google-app-engine-base | django/db/models/fields/subclassing.py | 229 | 4356 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the __metaclass__ for your Field subclass, implement
to_python() and the other necessary methods and everything will work seamlessly.
"""
from inspect import getargspec
from warnings import warn
def call_with_connection(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = ('connection' in arg_names or varkwargs)
if not updated:
warn("A Field class whose %s method hasn't been updated to take a "
"`connection` argument." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
return func(*args, **kwargs)
return inner
def call_with_connection_and_prepared(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = (
('connection' in arg_names or varkwargs) and
('prepared' in arg_names or varkwargs)
)
if not updated:
warn("A Field class whose %s method hasn't been updated to take "
"`connection` and `prepared` arguments." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
if 'prepared' in kwargs:
del kwargs['prepared']
return func(*args, **kwargs)
return inner
class LegacyConnection(type):
"""
A metaclass to normalize arguments give to the get_db_prep_* and db_type
methods on fields.
"""
def __new__(cls, name, bases, attrs):
new_cls = super(LegacyConnection, cls).__new__(cls, name, bases, attrs)
for attr in ('db_type', 'get_db_prep_save'):
setattr(new_cls, attr, call_with_connection(getattr(new_cls, attr)))
for attr in ('get_db_prep_lookup', 'get_db_prep_value'):
setattr(new_cls, attr, call_with_connection_and_prepared(getattr(new_cls, attr)))
return new_cls
class SubfieldBase(LegacyConnection):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| unlicense |
tthtlc/volatility | volatility/plugins/overlays/linux/linux64.py | 44 | 1595 | # Volatility
# Copyright (c) 2011 Michael Cohen <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
Support for 64 bit Linux systems.
@author: Michael Cohen
@license: GNU General Public License 2.0
@contact: [email protected]
"""
from volatility import obj
class VolatilityDTB(obj.VolatilityMagic):
"""A scanner for DTB values."""
def generate_suggestions(self):
"""Tries to locate the DTB."""
profile = self.obj_vm.profile
yield profile.get_symbol("init_level4_pgt") - 0xffffffff80000000
class Linux64ObjectClasses(obj.ProfileModification):
""" Makes slight changes to the DTB checker """
conditions = {'os': lambda x: x == 'linux',
'memory_model': lambda x: x == '64bit'}
before = ['LinuxObjectClasses']
def modification(self, profile):
profile.object_classes.update({
'VolatilityDTB': VolatilityDTB
})
| gpl-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/account/account_financial_report.py | 339 | 7636 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
# ---------------------------------------------------------
# Account Financial Report
# ---------------------------------------------------------
class account_financial_report(osv.osv):
_name = "account.financial.report"
_description = "Account Report"
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
'''Returns a dictionary with key=the ID of a record and value = the level of this
record in the tree structure.'''
res = {}
for report in self.browse(cr, uid, ids, context=context):
level = 0
if report.parent_id:
level = report.parent_id.level + 1
res[report.id] = level
return res
def _get_children_by_order(self, cr, uid, ids, context=None):
'''returns a dictionary with the key= the ID of a record and value = all its children,
computed recursively, and sorted by sequence. Ready for the printing'''
res = []
for id in ids:
res.append(id)
ids2 = self.search(cr, uid, [('parent_id', '=', id)], order='sequence ASC', context=context)
res += self._get_children_by_order(cr, uid, ids2, context=context)
return res
def _get_balance(self, cr, uid, ids, field_names, args, context=None):
'''returns a dictionary with key=the ID of a record and value=the balance amount
computed for this record. If the record is of type :
'accounts' : it's the sum of the linked accounts
'account_type' : it's the sum of leaf accoutns with such an account_type
'account_report' : it's the amount of the related report
'sum' : it's the sum of the children of this record (aka a 'view' record)'''
account_obj = self.pool.get('account.account')
res = {}
for report in self.browse(cr, uid, ids, context=context):
if report.id in res:
continue
res[report.id] = dict((fn, 0.0) for fn in field_names)
if report.type == 'accounts':
# it's the sum of the linked accounts
for a in report.account_ids:
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_type':
# it's the sum the leaf accounts with such an account type
report_types = [x.id for x in report.account_type_ids]
account_ids = account_obj.search(cr, uid, [('user_type','in', report_types), ('type','!=','view')], context=context)
for a in account_obj.browse(cr, uid, account_ids, context=context):
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_report' and report.account_report_id:
# it's the amount of the linked report
res2 = self._get_balance(cr, uid, [report.account_report_id.id], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
elif report.type == 'sum':
# it's the sum of the children of this account.report
res2 = self._get_balance(cr, uid, [rec.id for rec in report.children_ids], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
return res
_columns = {
'name': fields.char('Report Name', required=True, translate=True),
'parent_id': fields.many2one('account.financial.report', 'Parent'),
'children_ids': fields.one2many('account.financial.report', 'parent_id', 'Account Report'),
'sequence': fields.integer('Sequence'),
'balance': fields.function(_get_balance, 'Balance', multi='balance'),
'debit': fields.function(_get_balance, 'Debit', multi='balance'),
'credit': fields.function(_get_balance, 'Credit', multi="balance"),
'level': fields.function(_get_level, string='Level', store=True, type='integer'),
'type': fields.selection([
('sum','View'),
('accounts','Accounts'),
('account_type','Account Type'),
('account_report','Report Value'),
],'Type'),
'account_ids': fields.many2many('account.account', 'account_account_financial_report', 'report_line_id', 'account_id', 'Accounts'),
'account_report_id': fields.many2one('account.financial.report', 'Report Value'),
'account_type_ids': fields.many2many('account.account.type', 'account_account_financial_report_type', 'report_id', 'account_type_id', 'Account Types'),
'sign': fields.selection([(-1, 'Reverse balance sign'), (1, 'Preserve balance sign')], 'Sign on Reports', required=True, help='For accounts that are typically more debited than credited and that you would like to print as negative amounts in your reports, you should reverse the sign of the balance; e.g.: Expense account. The same applies for accounts that are typically more credited than debited and that you would like to print as positive amounts in your reports; e.g.: Income account.'),
'display_detail': fields.selection([
('no_detail','No detail'),
('detail_flat','Display children flat'),
('detail_with_hierarchy','Display children with hierarchy')
], 'Display details'),
'style_overwrite': fields.selection([
(0, 'Automatic formatting'),
(1,'Main Title 1 (bold, underlined)'),
(2,'Title 2 (bold)'),
(3,'Title 3 (bold, smaller)'),
(4,'Normal Text'),
(5,'Italic Text (smaller)'),
(6,'Smallest Text'),
],'Financial Report Style', help="You can set up here the format you want this record to be displayed. If you leave the automatic formatting, it will be computed based on the financial reports hierarchy (auto-computed field 'level')."),
}
_defaults = {
'type': 'sum',
'display_detail': 'detail_flat',
'sign': 1,
'style_overwrite': 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Dany3R9/ns-3-dev-ndnSIM | src/dsdv/bindings/callbacks_list.py | 151 | 1222 | callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
lindsayad/sympy | sympy/interactive/tests/test_ipythonprinting.py | 11 | 6263 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
# issue 9799
app.run_cell("from sympy import Piecewise, Symbol, Eq")
app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
| bsd-3-clause |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/ribbon/art_aui.py | 6 | 55653 | """
L{RibbonAUIArtProvider} is responsible for drawing all the components of the ribbon
interface using an AUI-compatible appearance.
Description
===========
This allows a ribbon bar to have a pluggable look-and-feel, while retaining the same
underlying behaviour. As a single art provider is used for all ribbon components, a
ribbon bar usually has a consistent (though unique) appearance.
By default, a L{RibbonBar} uses an instance of a class called `RibbonDefaultArtProvider`,
which resolves to `RibbonAUIArtProvider`, `RibbonMSWArtProvider`, or `RibbonOSXArtProvider`
- whichever is most appropriate to the current platform. These art providers are all
slightly configurable with regard to colours and fonts, but for larger modifications,
you can derive from one of these classes, or write a completely new art provider class.
Call L{RibbonBar.SetArtProvider} to change the art provider being used.
See Also
========
L{RibbonBar}
"""
import wx
from math import cos
from math import pi as M_PI
from art_msw import RibbonMSWArtProvider
from art_internal import RibbonHSLColour, RibbonShiftLuminance, RibbonInterpolateColour
import bar as BAR, panel as PANEL
from art import *
if wx.Platform == "__WXMAC__":
import Carbon.Appearance
def FontFromFont(original):
newFont = wx.Font(original.GetPointSize(), original.GetFamily(),
original.GetStyle(), original.GetWeight(), original.GetUnderlined(),
original.GetFaceName(), original.GetEncoding())
return newFont
class RibbonAUIArtProvider(RibbonMSWArtProvider):
def __init__(self):
RibbonMSWArtProvider.__init__(self)
if wx.Platform == "__WXMAC__":
if hasattr(wx, 'MacThemeColour'):
base_colour = wx.MacThemeColour(Carbon.Appearance.kThemeBrushToolbarBackground)
else:
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushToolbarBackground)
base_colour = brush.GetColour()
else:
base_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)
self.SetColourScheme(base_colour, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT),
wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
self._tab_active_label_font = FontFromFont(self._tab_label_font)
self._tab_active_label_font.SetWeight(wx.FONTWEIGHT_BOLD)
self._page_border_left = 1
self._page_border_right = 1
self._page_border_top = 1
self._page_border_bottom = 2
self._tab_separation_size = 0
self._gallery_bitmap_padding_left_size = 3
self._gallery_bitmap_padding_right_size = 3
self._gallery_bitmap_padding_top_size = 3
self._gallery_bitmap_padding_bottom_size = 3
def Clone(self):
"""
Create a new art provider which is a clone of this one.
"""
copy = RibbonAUIArtProvider()
self.CloneTo(copy)
copy._tab_ctrl_background_colour = self._tab_ctrl_background_colour
copy._tab_ctrl_background_gradient_colour = self._tab_ctrl_background_gradient_colour
copy._panel_label_background_colour = self._panel_label_background_colour
copy._panel_label_background_gradient_colour = self._panel_label_background_gradient_colour
copy._panel_hover_label_background_colour = self._panel_hover_label_background_colour
copy._panel_hover_label_background_gradient_colour = self._panel_hover_label_background_gradient_colour
copy._background_brush = self._background_brush
copy._tab_active_top_background_brush = self._tab_active_top_background_brush
copy._tab_hover_background_brush = self._tab_hover_background_brush
copy._button_bar_hover_background_brush = self._button_bar_hover_background_brush
copy._button_bar_active_background_brush = self._button_bar_active_background_brush
copy._gallery_button_active_background_brush = self._gallery_button_active_background_brush
copy._gallery_button_hover_background_brush = self._gallery_button_hover_background_brush
copy._gallery_button_disabled_background_brush = self._gallery_button_disabled_background_brush
copy._toolbar_hover_borden_pen = self._toolbar_hover_borden_pen
copy._tool_hover_background_brush = self._tool_hover_background_brush
copy._tool_active_background_brush = self._tool_active_background_brush
return copy
def SetFont(self, id, font):
"""
Set the value of a certain font setting to the value.
can be one of the font values of `RibbonArtSetting`.
:param `id`: the font id;
:param `font`: MISSING DESCRIPTION.
"""
RibbonMSWArtProvider.SetFont(self, id, font)
if id == RIBBON_ART_TAB_LABEL_FONT:
self._tab_active_label_font = FontFromFont(self._tab_label_font)
self._tab_active_label_font.SetWeight(wx.FONTWEIGHT_BOLD)
def GetColour(self, id):
"""
Get the value of a certain colour setting.
can be one of the colour values of `RibbonArtSetting`.
:param `id`: the colour id.
"""
if id in [RIBBON_ART_PAGE_BACKGROUND_COLOUR, RIBBON_ART_PAGE_BACKGROUND_GRADIENT_COLOUR]:
return self._background_brush.GetColour()
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_COLOUR:
return self._tab_ctrl_background_colour
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_GRADIENT_COLOUR:
return self._tab_ctrl_background_gradient_colour
elif id in [RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_COLOUR, RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR]:
return self._tab_active_top_background_brush.GetColour()
elif id in [RIBBON_ART_TAB_HOVER_BACKGROUND_COLOUR, RIBBON_ART_TAB_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._tab_hover_background_brush.GetColour()
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_COLOUR:
return self._panel_label_background_colour
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_GRADIENT_COLOUR:
return self._panel_label_background_gradient_colour
elif id == RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_COLOUR:
return self._panel_hover_label_background_colour
elif id == RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_GRADIENT_COLOUR:
return self._panel_hover_label_background_gradient_colour
elif id in [RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_COLOUR, RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._button_bar_hover_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_hover_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_active_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_disabled_background_brush.GetColour()
else:
return RibbonMSWArtProvider.GetColour(self, id)
def SetColour(self, id, colour):
"""
Set the value of a certain colour setting to the value.
can be one of the colour values of `RibbonArtSetting`, though not all colour
settings will have an affect on every art provider.
:param `id`: the colour id;
:param `colour`: MISSING DESCRIPTION.
:see: L{SetColourScheme}
"""
if id in [RIBBON_ART_PAGE_BACKGROUND_COLOUR, RIBBON_ART_PAGE_BACKGROUND_GRADIENT_COLOUR]:
self._background_brush.SetColour(colour)
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_COLOUR:
self._tab_ctrl_background_colour = colour
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_GRADIENT_COLOUR:
self._tab_ctrl_background_gradient_colour = colour
elif id in [RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_COLOUR, RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR]:
self._tab_active_top_background_brush.SetColour(colour)
elif id in [RIBBON_ART_TAB_HOVER_BACKGROUND_COLOUR, RIBBON_ART_TAB_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._tab_hover_background_brush.SetColour(colour)
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_COLOUR:
self._panel_label_background_colour = colour
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_GRADIENT_COLOUR:
self._panel_label_background_gradient_colour = colour
elif id in [RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_COLOUR, RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._button_bar_hover_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_hover_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_active_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_disabled_background_brush.SetColour(colour)
else:
RibbonMSWArtProvider.SetColour(self, id, colour)
def SetColourScheme(self, primary, secondary, tertiary):
"""
Set all applicable colour settings from a few base colours.
Uses any or all of the three given colours to create a colour scheme, and then
sets all colour settings which are relevant to the art provider using that
scheme. Note that some art providers may not use the tertiary colour for
anything, and some may not use the secondary colour either.
:param `primary`: MISSING DESCRIPTION;
:param `secondary`: MISSING DESCRIPTION;
:param `tertiary`: MISSING DESCRIPTION.
:see: L{SetColour}, L{RibbonMSWArtProvider.GetColourScheme}
"""
primary_hsl = RibbonHSLColour(primary)
secondary_hsl = RibbonHSLColour(secondary)
tertiary_hsl = RibbonHSLColour(tertiary)
# Map primary & secondary luminance from [0, 1] to [0.15, 0.85]
primary_hsl.luminance = cos(primary_hsl.luminance * M_PI) * -0.35 + 0.5
secondary_hsl.luminance = cos(secondary_hsl.luminance * M_PI) * -0.35 + 0.5
# TODO: Remove next line once this provider stops piggybacking MSW
RibbonMSWArtProvider.SetColourScheme(self, primary, secondary, tertiary)
self._tab_ctrl_background_colour = RibbonShiftLuminance(primary_hsl, 0.9).ToRGB()
self._tab_ctrl_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 1.7).ToRGB()
self._tab_border_pen = wx.Pen(RibbonShiftLuminance(primary_hsl, 0.75).ToRGB())
self._tab_label_colour = RibbonShiftLuminance(primary_hsl, 0.1).ToRGB()
self._tab_hover_background_top_colour = primary_hsl.ToRGB()
self._tab_hover_background_top_gradient_colour = RibbonShiftLuminance(primary_hsl, 1.6).ToRGB()
self._tab_hover_background_brush = wx.Brush(self._tab_hover_background_top_colour)
self._tab_active_background_colour = self._tab_ctrl_background_gradient_colour
self._tab_active_background_gradient_colour = primary_hsl.ToRGB()
self._tab_active_top_background_brush = wx.Brush(self._tab_active_background_colour)
self._panel_label_colour = self._tab_label_colour
self._panel_minimised_label_colour = self._panel_label_colour
self._panel_hover_label_colour = tertiary_hsl.ToRGB()
self._page_border_pen = self._tab_border_pen
self._panel_border_pen = self._tab_border_pen
self._background_brush = wx.Brush(primary_hsl.ToRGB())
self._page_hover_background_colour = RibbonShiftLuminance(primary_hsl, 1.5).ToRGB()
self._page_hover_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 0.9).ToRGB()
self._panel_label_background_colour = RibbonShiftLuminance(primary_hsl, 0.85).ToRGB()
self._panel_label_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 0.97).ToRGB()
self._panel_hover_label_background_gradient_colour = secondary_hsl.ToRGB()
self._panel_hover_label_background_colour = secondary_hsl.Lighter(0.2).ToRGB()
self._button_bar_hover_border_pen = wx.Pen(secondary_hsl.ToRGB())
self._button_bar_hover_background_brush = wx.Brush(RibbonShiftLuminance(secondary_hsl, 1.7).ToRGB())
self._button_bar_active_background_brush = wx.Brush(RibbonShiftLuminance(secondary_hsl, 1.4).ToRGB())
self._button_bar_label_colour = self._tab_label_colour
self._gallery_border_pen = self._tab_border_pen
self._gallery_item_border_pen = self._button_bar_hover_border_pen
self._gallery_hover_background_brush = wx.Brush(RibbonShiftLuminance(primary_hsl, 1.2).ToRGB())
self._gallery_button_background_colour = self._page_hover_background_colour
self._gallery_button_background_gradient_colour = self._page_hover_background_gradient_colour
self._gallery_button_hover_background_brush = self._button_bar_hover_background_brush
self._gallery_button_active_background_brush = self._button_bar_active_background_brush
self._gallery_button_disabled_background_brush = wx.Brush(primary_hsl.Desaturated(0.15).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_FACE_COLOUR, RibbonShiftLuminance(primary_hsl, 0.1).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_DISABLED_FACE_COLOUR, wx.Colour(128, 128, 128))
self.SetColour(RIBBON_ART_GALLERY_BUTTON_ACTIVE_FACE_COLOUR, RibbonShiftLuminance(secondary_hsl, 0.1).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_HOVER_FACE_COLOUR, RibbonShiftLuminance(secondary_hsl, 0.1).ToRGB())
self._toolbar_border_pen = self._tab_border_pen
self.SetColour(RIBBON_ART_TOOLBAR_FACE_COLOUR, RibbonShiftLuminance(primary_hsl, 0.1).ToRGB())
self._tool_background_colour = self._page_hover_background_colour
self._tool_background_gradient_colour = self._page_hover_background_gradient_colour
self._toolbar_hover_borden_pen = self._button_bar_hover_border_pen
self._tool_hover_background_brush = self._button_bar_hover_background_brush
self._tool_active_background_brush = self._button_bar_active_background_brush
def DrawTabCtrlBackground(self, dc, wnd, rect):
"""
Draw the background of the tab region of a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw.
"""
gradient_rect = wx.Rect(*rect)
gradient_rect.height -= 1
dc.GradientFillLinear(gradient_rect, self._tab_ctrl_background_colour, self._tab_ctrl_background_gradient_colour, wx.SOUTH)
dc.SetPen(self._tab_border_pen)
dc.DrawLine(rect.x, rect.GetBottom(), rect.GetRight()+1, rect.GetBottom())
def GetTabCtrlHeight(self, dc, wnd, pages):
"""
Calculate the height (in pixels) of the tab region of a ribbon bar.
Note that as the tab region can contain scroll buttons, the height should be
greater than or equal to the minimum height for a tab scroll button.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the tabs will eventually be drawn;
:param `pages`: The tabs which will acquire the returned height.
"""
text_height = 0
icon_height = 0
if len(pages) <= 1 and (self._flags & RIBBON_BAR_ALWAYS_SHOW_TABS) == 0:
# To preserve space, a single tab need not be displayed. We still need
# one pixel of border though.
return 1
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
dc.SetFont(self._tab_active_label_font)
text_height = dc.GetTextExtent("ABCDEFXj")[1]
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
for info in pages:
if info.page.GetIcon().IsOk():
icon_height = max(icon_height, info.page.GetIcon().GetHeight())
return max(text_height, icon_height) + 10
def DrawTab(self, dc, wnd, tab):
"""
Draw a single tab in the tab region of a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (not the L{RibbonPage}
associated with the tab being drawn);
:param `tab`: The rectangle within which to draw, and also the tab label,
icon, and state (active and/or hovered). The drawing rectangle will be
entirely within a rectangle on the same device context previously painted
with L{DrawTabCtrlBackground}. The rectangle's width will be at least the
minimum value returned by L{GetBarTabWidth}, and height will be the value
returned by L{GetTabCtrlHeight}.
"""
if tab.rect.height <= 1:
return
dc.SetFont(self._tab_label_font)
dc.SetPen(wx.TRANSPARENT_PEN)
if tab.active or tab.hovered:
if tab.active:
dc.SetFont(self._tab_active_label_font)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(tab.rect.x, tab.rect.y + tab.rect.height - 1, tab.rect.width - 1, 1)
grad_rect = wx.Rect(*tab.rect)
grad_rect.height -= 4
grad_rect.width -= 1
grad_rect.height /= 2
grad_rect.y = grad_rect.y + tab.rect.height - grad_rect.height - 1
dc.SetBrush(self._tab_active_top_background_brush)
dc.DrawRectangle(tab.rect.x, tab.rect.y + 3, tab.rect.width - 1, grad_rect.y - tab.rect.y - 3)
dc.GradientFillLinear(grad_rect, self._tab_active_background_colour, self._tab_active_background_gradient_colour, wx.SOUTH)
else:
btm_rect = wx.Rect(*tab.rect)
btm_rect.height -= 4
btm_rect.width -= 1
btm_rect.height /= 2
btm_rect.y = btm_rect.y + tab.rect.height - btm_rect.height - 1
dc.SetBrush(self._tab_hover_background_brush)
dc.DrawRectangle(btm_rect.x, btm_rect.y, btm_rect.width, btm_rect.height)
grad_rect = wx.Rect(*tab.rect)
grad_rect.width -= 1
grad_rect.y += 3
grad_rect.height = btm_rect.y - grad_rect.y
dc.GradientFillLinear(grad_rect, self._tab_hover_background_top_colour, self._tab_hover_background_top_gradient_colour, wx.SOUTH)
border_points = [wx.Point() for i in xrange(5)]
border_points[0] = wx.Point(0, 3)
border_points[1] = wx.Point(1, 2)
border_points[2] = wx.Point(tab.rect.width - 3, 2)
border_points[3] = wx.Point(tab.rect.width - 1, 4)
border_points[4] = wx.Point(tab.rect.width - 1, tab.rect.height - 1)
dc.SetPen(self._tab_border_pen)
dc.DrawLines(border_points, tab.rect.x, tab.rect.y)
old_clip = dc.GetClippingRect()
is_first_tab = False
bar = tab.page.GetParent()
icon = wx.NullBitmap
if isinstance(bar, BAR.RibbonBar) and bar.GetPage(0) == tab.page:
is_first_tab = True
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = tab.page.GetIcon()
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS == 0:
x = tab.rect.x + (tab.rect.width - icon.GetWidth()) / 2
dc.DrawBitmap(icon, x, tab.rect.y + 1 + (tab.rect.height - 1 - icon.GetHeight()) / 2, True)
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = tab.page.GetLabel()
if label.strip():
dc.SetTextForeground(self._tab_label_colour)
dc.SetBackgroundMode(wx.TRANSPARENT)
offset = 0
if icon.IsOk():
offset += icon.GetWidth() + 2
text_width, text_height = dc.GetTextExtent(label)
x = (tab.rect.width - 2 - text_width - offset) / 2
if x > 8:
x = 8
elif x < 1:
x = 1
width = tab.rect.width - x - 2
x += tab.rect.x + offset
y = tab.rect.y + (tab.rect.height - text_height) / 2
if icon.IsOk():
dc.DrawBitmap(icon, x - offset, tab.rect.y + (tab.rect.height - icon.GetHeight()) / 2, True)
dc.SetClippingRegion(x, tab.rect.y, width, tab.rect.height)
dc.DrawText(label, x, y)
# Draw the left hand edge of the tab only for the first tab (subsequent
# tabs use the right edge of the prior tab as their left edge). As this is
# outside the rectangle for the tab, only draw it if the leftmost part of
# the tab is within the clip rectangle (the clip region has to be cleared
# to draw outside the tab).
if is_first_tab and old_clip.x <= tab.rect.x and tab.rect.x < old_clip.x + old_clip.width:
dc.DestroyClippingRegion()
dc.DrawLine(tab.rect.x - 1, tab.rect.y + 4, tab.rect.x - 1, tab.rect.y + tab.rect.height - 1)
def GetBarTabWidth(self, dc, wnd, label, bitmap, ideal=None, small_begin_need_separator=None,
small_must_have_separator=None, minimum=None):
"""
Calculate the ideal and minimum width (in pixels) of a tab in a ribbon bar.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the tab will eventually be drawn;
:param `label`: The tab's label (or wx.EmptyString if it has none);
:param `bitmap`: The tab's icon (or wx.NullBitmap if it has none);
:param `ideal`: The ideal width (in pixels) of the tab;
:param `small_begin_need_separator`: A size less than the size, at which a tab
separator should begin to be drawn (i.e. drawn, but still fairly transparent);
:param `small_must_have_separator`: A size less than the size, at which a tab
separator must be drawn (i.e. drawn at full opacity);
:param `minimum`: A size less than the size, and greater than or equal to zero,
which is the minimum pixel width for the tab.
"""
width = mini = 0
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS and label.strip():
dc.SetFont(self._tab_active_label_font)
width += dc.GetTextExtent(label)[0]
mini += min(30, width) # enough for a few chars
if bitmap.IsOk():
# gap between label and bitmap
width += 4
mini += 2
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS and bitmap.IsOk():
width += bitmap.GetWidth()
mini += bitmap.GetWidth()
ideal = width + 16
small_begin_need_separator = mini
small_must_have_separator = mini
minimum = mini
return ideal, small_begin_need_separator, small_must_have_separator, minimum
def DrawTabSeparator(self, dc, wnd, rect, visibility):
"""
Draw a separator between two tabs in a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw, which will be entirely
within a rectangle on the same device context previously painted with
L{DrawTabCtrlBackground};
:param `visibility`: The opacity with which to draw the separator. Values
are in the range [0, 1], with 0 being totally transparent, and 1 being totally
opaque.
"""
# No explicit separators between tabs
pass
def DrawPageBackground(self, dc, wnd, rect):
"""
Draw the background of a ribbon page.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (which is commonly the
L{RibbonPage} whose background is being drawn, but doesn't have to be);
:param `rect`: The rectangle within which to draw.
:see: L{RibbonMSWArtProvider.GetPageBackgroundRedrawArea}
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x + 1, rect.y, rect.width - 2, rect.height - 1)
dc.SetPen(self._page_border_pen)
dc.DrawLine(rect.x, rect.y, rect.x, rect.y + rect.height)
dc.DrawLine(rect.GetRight(), rect.y, rect.GetRight(), rect.y +rect.height)
dc.DrawLine(rect.x, rect.GetBottom(), rect.GetRight()+1, rect.GetBottom())
def GetScrollButtonMinimumSize(self, dc, wnd, style):
"""
Calculate the minimum size (in pixels) of a scroll button.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the scroll button will eventually be drawn;
:param `style`: A combination of flags from `RibbonScrollButtonStyle`, including
a direction, and a for flag (state flags may be given too, but should be ignored,
as a button should retain a constant size, regardless of its state).
"""
return wx.Size(11, 11)
def DrawScrollButton(self, dc, wnd, rect, style):
"""
Draw a ribbon-style scroll button.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be at least the size returned by L{GetScrollButtonMinimumSize} for a
scroll button with the same style. For tab scroll buttons, this rectangle
will be entirely within a rectangle on the same device context previously
painted with L{DrawTabCtrlBackground}, but this is not guaranteed for other
types of button (for example, page scroll buttons will not be painted on
an area previously painted with L{DrawPageBackground});
:param `style`: A combination of flags from `RibbonScrollButtonStyle`,
including a direction, a for flag, and one or more states.
"""
true_rect = wx.Rect(*rect)
arrow_points = [wx.Point() for i in xrange(3)]
if style & RIBBON_SCROLL_BTN_FOR_MASK == RIBBON_SCROLL_BTN_FOR_TABS:
true_rect.y += 2
true_rect.height -= 2
dc.SetPen(self._tab_border_pen)
else:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
dc.SetPen(self._page_border_pen)
result = style & RIBBON_SCROLL_BTN_DIRECTION_MASK
if result == RIBBON_SCROLL_BTN_LEFT:
dc.DrawLine(true_rect.GetRight(), true_rect.y, true_rect.GetRight(), true_rect.y + true_rect.height)
arrow_points[0] = wx.Point(rect.width / 2 - 2, rect.height / 2)
arrow_points[1] = arrow_points[0] + wx.Point(5, -5)
arrow_points[2] = arrow_points[0] + wx.Point(5, 5)
elif result == RIBBON_SCROLL_BTN_RIGHT:
dc.DrawLine(true_rect.x, true_rect.y, true_rect.x, true_rect.y + true_rect.height)
arrow_points[0] = wx.Point(rect.width / 2 + 3, rect.height / 2)
arrow_points[1] = arrow_points[0] - wx.Point(5, -5)
arrow_points[2] = arrow_points[0] - wx.Point(5, 5)
elif result == RIBBON_SCROLL_BTN_DOWN:
dc.DrawLine(true_rect.x, true_rect.y, true_rect.x + true_rect.width, true_rect.y)
arrow_points[0] = wx.Point(rect.width / 2, rect.height / 2 + 3)
arrow_points[1] = arrow_points[0] - wx.Point( 5, 5)
arrow_points[2] = arrow_points[0] - wx.Point(-5, 5)
elif result == RIBBON_SCROLL_BTN_UP:
dc.DrawLine(true_rect.x, true_rect.GetBottom(), true_rect.x + true_rect.width, true_rect.GetBottom())
arrow_points[0] = wx.Point(rect.width / 2, rect.height / 2 - 2)
arrow_points[1] = arrow_points[0] + wx.Point( 5, 5)
arrow_points[2] = arrow_points[0] + wx.Point(-5, 5)
else:
return
x = rect.x
y = rect.y
if style & RIBBON_SCROLL_BTN_ACTIVE:
x += 1
y += 1
dc.SetPen(wx.TRANSPARENT_PEN)
B = wx.Brush(self._tab_label_colour)
dc.SetBrush(B)
dc.DrawPolygon(arrow_points, x, y)
def GetPanelSize(self, dc, wnd, client_size, client_offset=None):
"""
Calculate the size of a panel for a given client size.
This should increment the given size by enough to fit the panel label and other
chrome.
:param `dc`: A device context to use if one is required for size calculations;
:param `wnd`: The ribbon panel in question;
:param `client_size`: The client size;
:param `client_offset`: The offset where the client rectangle begins within
the panel (may be ``None``).
:see: L{GetPanelClientSize}
"""
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
client_size.IncBy(4, label_height + 6)
if client_offset is not None:
client_offset = wx.Point(2, label_height + 3)
else:
client_size.IncBy(6, label_height + 4)
if client_offset is not None:
client_offset = wx.Point(3, label_height + 2)
return client_size
def GetPanelClientSize(self, dc, wnd, size, client_offset=None):
"""
Calculate the client size of a panel for a given overall size.
This should act as the inverse to L{GetPanelSize}, and decrement the given size
by enough to fit the panel label and other chrome.
:param `dc`: A device context to use if one is required for size calculations;
:param `wnd`: The ribbon panel in question;
:param `size`: The overall size to calculate client size for;
:param `client_offset`: The offset where the returned client size begins within
the given (may be ``None``).
:see: L{GetPanelSize}
"""
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
size.DecBy(4, label_height + 6)
if client_offset is not None:
client_offset = wx.Point(2, label_height + 3)
else:
size.DecBy(6, label_height + 4)
if client_offset is not None:
client_offset = wx.Point(3, label_height + 2)
return size, client_offset
def DrawPanelBackground(self, dc, wnd, rect):
"""
Draw the background and chrome for a ribbon panel.
This should draw the border, background, label, and any other items of a panel
which are outside the client area of a panel. Note that when a panel is
minimised, this function is not called - only L{DrawMinimisedPanel} is called,
so a background should be explicitly painted by that if required.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the panel
whose background and chrome is being drawn. The panel label and other panel
attributes can be obtained by querying this;
:param `rect`: The rectangle within which to draw.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(true_rect.x, true_rect.y, true_rect.width, true_rect.height)
true_rect.x += 1
true_rect.width -= 2
true_rect.y += 1
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
label_rect = wx.Rect(*true_rect)
label_rect.height = label_height - 1
dc.DrawLine(label_rect.x, label_rect.y + label_rect.height, label_rect.x + label_rect.width, label_rect.y + label_rect.height)
label_bg_colour = self._panel_label_background_colour
label_bg_grad_colour = self._panel_label_background_gradient_colour
if wnd.IsHovered():
label_bg_colour = self._panel_hover_label_background_colour
label_bg_grad_colour = self._panel_hover_label_background_gradient_colour
dc.SetTextForeground(self._panel_hover_label_colour)
else:
dc.SetTextForeground(self._panel_label_colour)
if wx.Platform == "__WXMAC__":
dc.GradientFillLinear(label_rect, label_bg_grad_colour, label_bg_colour, wx.SOUTH)
else:
dc.GradientFillLinear(label_rect, label_bg_colour, label_bg_grad_colour, wx.SOUTH)
dc.SetFont(self._panel_label_font)
dc.DrawText(wnd.GetLabel(), label_rect.x + 3, label_rect.y + 2)
if wnd.IsHovered():
gradient_rect = wx.Rect(*true_rect)
gradient_rect.y += label_rect.height + 1
gradient_rect.height = true_rect.height - label_rect.height - 3
if wx.Platform == "__WXMAC__":
colour = self._page_hover_background_gradient_colour
gradient = self._page_hover_background_colour
else:
colour = self._page_hover_background_colour
gradient = self._page_hover_background_gradient_colour
dc.GradientFillLinear(gradient_rect, colour, gradient, wx.SOUTH)
def DrawMinimisedPanel(self, dc, wnd, rect, bitmap):
"""
Draw a minimised ribbon panel.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the panel
which is minimised. The panel label can be obtained from this window. The
minimised icon obtained from querying the window may not be the size requested
by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize} - the argument contains the icon in the
requested size;
:param `rect`: The rectangle within which to draw. The size of the rectangle
will be at least the size returned by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize};
:param `bitmap`: A copy of the panel's minimised bitmap rescaled to the size
returned by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize}.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(true_rect.x, true_rect.y, true_rect.width, true_rect.height)
true_rect.Deflate(1, 1)
if wnd.IsHovered() or wnd.GetExpandedPanel():
colour = self._page_hover_background_colour
gradient = self._page_hover_background_gradient_colour
if (wx.Platform == "__WXMAC__" and not wnd.GetExpandedPanel()) or \
(wx.Platform != "__WXMAC__" and wnd.GetExpandedPanel()):
temp = colour
colour = gradient
gradient = temp
dc.GradientFillLinear(true_rect, colour, gradient, wx.SOUTH)
preview = self.DrawMinimisedPanelCommon(dc, wnd, true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(preview.x, preview.y, preview.width, preview.height)
preview.Deflate(1, 1)
preview_caption_rect = wx.Rect(*preview)
preview_caption_rect.height = 7
preview.y += preview_caption_rect.height
preview.height -= preview_caption_rect.height
if wx.Platform == "__WXMAC__":
dc.GradientFillLinear(preview_caption_rect, self._panel_hover_label_background_gradient_colour,
self._panel_hover_label_background_colour, wx.SOUTH)
dc.GradientFillLinear(preview, self._page_hover_background_gradient_colour,
self._page_hover_background_colour, wx.SOUTH)
else:
dc.GradientFillLinear(preview_caption_rect, self._panel_hover_label_background_colour,
self._panel_hover_label_background_gradient_colour, wx.SOUTH)
dc.GradientFillLinear(preview, self._page_hover_background_colour,
self._page_hover_background_gradient_colour, wx.SOUTH)
if bitmap.IsOk():
dc.DrawBitmap(bitmap, preview.x + (preview.width - bitmap.GetWidth()) / 2,
preview.y + (preview.height - bitmap.GetHeight()) / 2, True)
def DrawPartialPanelBackground(self, dc, wnd, rect):
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
offset = wx.Point(*wnd.GetPosition())
parent = wnd.GetParent()
panel = None
while 1:
panel = parent
if isinstance(panel, PANEL.RibbonPanel):
if not panel.IsHovered():
return
break
offset += parent.GetPosition()
parent = panel.GetParent()
if panel is None:
return
background = wx.Rect(0, 0, *panel.GetSize())
background = self.RemovePanelPadding(background)
background.x += 1
background.width -= 2
dc.SetFont(self._panel_label_font)
caption_height = dc.GetTextExtent(panel.GetLabel())[1] + 7
background.y += caption_height - 1
background.height -= caption_height
paint_rect = wx.Rect(*rect)
paint_rect.x += offset.x
paint_rect.y += offset.y
if wx.Platform == "__WXMAC__":
bg_grad_clr = self._page_hover_background_colour
bg_clr = self._page_hover_background_gradient_colour
else:
bg_clr = self._page_hover_background_colour
bg_grad_clr = self._page_hover_background_gradient_colour
paint_rect.Intersect(background)
if not paint_rect.IsEmpty():
starting_colour = RibbonInterpolateColour(bg_clr, bg_grad_clr, paint_rect.y, background.y, background.y + background.height)
ending_colour = RibbonInterpolateColour(bg_clr, bg_grad_clr, paint_rect.y + paint_rect.height, background.y, background.y + background.height)
paint_rect.x -= offset.x
paint_rect.y -= offset.y
dc.GradientFillLinear(paint_rect, starting_colour, ending_colour, wx.SOUTH)
def DrawGalleryBackground(self, dc, wnd, rect):
"""
Draw the background and chrome for a L{RibbonGallery} control.
This should draw the border, brackground, scroll buttons, extension button, and
any other UI elements which are not attached to a specific gallery item.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the gallery
whose background and chrome is being drawn. Attributes used during drawing like
the gallery hover state and individual button states can be queried from this
parameter by L{RibbonGallery.IsHovered}, L{RibbonGallery.GetExtensionButtonState},
L{RibbonGallery.GetUpButtonState}, and L{RibbonGallery.GetDownButtonState};
:param `rect`: The rectangle within which to draw. This rectangle is the entire
area of the gallery control, not just the client rectangle.
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
if wnd.IsHovered():
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._gallery_hover_background_brush)
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
dc.DrawRectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 16)
else:
dc.DrawRectangle(rect.x + 1, rect.y + 1, rect.width - 16, rect.height - 2)
dc.SetPen(self._gallery_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
self.DrawGalleryBackgroundCommon(dc, wnd, rect)
def DrawGalleryButton(self, dc, rect, state, bitmaps):
extra_height = 0
extra_width = 0
reduced_rect = wx.Rect(*rect)
reduced_rect.Deflate(1, 1)
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
reduced_rect.width += 1
extra_width = 1
else:
reduced_rect.height += 1
extra_height = 1
if state == RIBBON_GALLERY_BUTTON_NORMAL:
dc.GradientFillLinear(reduced_rect, self._gallery_button_background_colour, self._gallery_button_background_gradient_colour, wx.SOUTH)
btn_bitmap = bitmaps[0]
elif state == RIBBON_GALLERY_BUTTON_HOVERED:
dc.SetPen(self._gallery_item_border_pen)
dc.SetBrush(self._gallery_button_hover_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width + extra_width, rect.height + extra_height)
btn_bitmap = bitmaps[1]
elif state == RIBBON_GALLERY_BUTTON_ACTIVE:
dc.SetPen(self._gallery_item_border_pen)
dc.SetBrush(self._gallery_button_active_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width + extra_width, rect.height + extra_height)
btn_bitmap = bitmaps[2]
elif state == RIBBON_GALLERY_BUTTON_DISABLED:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._gallery_button_disabled_background_brush)
dc.DrawRectangle(reduced_rect.x, reduced_rect.y, reduced_rect.width, reduced_rect.height)
btn_bitmap = bitmaps[3]
dc.DrawBitmap(btn_bitmap, reduced_rect.x + reduced_rect.width / 2 - 2, (rect.y + rect.height / 2) - 2, True)
def DrawGalleryItemBackground(self, dc, wnd, rect, item):
"""
Draw the background of a single item in a L{RibbonGallery} control.
This is painted on top of a gallery background, and behind the items bitmap.
Unlike L{DrawButtonBarButton} and L{DrawTool}, it is not expected to draw the
item bitmap - that is done by the gallery control itself.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the gallery
which contains the item being drawn;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be the size of the item's bitmap, expanded by gallery item padding values
(``RIBBON_ART_GALLERY_BITMAP_PADDING_LEFT_SIZE``, ``RIBBON_ART_GALLERY_BITMAP_PADDING_RIGHT_SIZE``,
``RIBBON_ART_GALLERY_BITMAP_PADDING_TOP_SIZE``, and ``RIBBON_ART_GALLERY_BITMAP_PADDING_BOTTOM_SIZE``).
The drawing rectangle will be entirely within a rectangle on the same device
context previously painted with L{DrawGalleryBackground};
:param `item`: The item whose background is being painted. Typically the
background will vary if the item is hovered, active, or selected;
L{RibbonGallery.GetSelection}, L{RibbonGallery.GetActiveItem}, and
L{RibbonGallery.GetHoveredItem} can be called to test if the given item is in one of these states.
"""
if wnd.GetHoveredItem() != item and wnd.GetActiveItem() != item and wnd.GetSelection() != item:
return
dc.SetPen(self._gallery_item_border_pen)
if wnd.GetActiveItem() == item or wnd.GetSelection() == item:
dc.SetBrush(self._gallery_button_active_background_brush)
else:
dc.SetBrush(self._gallery_button_hover_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
def DrawButtonBarBackground(self, dc, wnd, rect):
"""
Draw the background for a L{bar.RibbonButtonBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (which will typically
be the button bar itself, though this is not guaranteed);
:param `rect`: The rectangle within which to draw.
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
def DrawButtonBarButton(self, dc, wnd, rect, kind, state, label, bitmap_large, bitmap_small):
"""
Draw a single button for a L{bar.RibbonButtonBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be a size previously returned by L{RibbonMSWArtProvider.GetButtonBarButtonSize}, and the
rectangle will be entirely within a rectangle on the same device context
previously painted with L{DrawButtonBarBackground};
:param `kind`: The kind of button to draw (normal, dropdown or hybrid);
:param `state`: Combination of a size flag and state flags from the
`RibbonButtonBarButtonState` enumeration;
:param `label`: The label of the button;
:param `bitmap_large`: The large bitmap of the button (or the large disabled
bitmap when ``RIBBON_BUTTONBAR_BUTTON_DISABLED`` is set in );
:param `bitmap_small`: The small bitmap of the button (or the small disabled
bitmap when ``RIBBON_BUTTONBAR_BUTTON_DISABLED`` is set in ).
"""
if state & (RIBBON_BUTTONBAR_BUTTON_HOVER_MASK | RIBBON_BUTTONBAR_BUTTON_ACTIVE_MASK):
dc.SetPen(self._button_bar_hover_border_pen)
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
if kind == RIBBON_BUTTON_HYBRID:
result = state & RIBBON_BUTTONBAR_BUTTON_SIZE_MASK
if result == RIBBON_BUTTONBAR_BUTTON_LARGE:
iYBorder = rect.y + bitmap_large.GetHeight() + 4
partial_bg = wx.Rect(*rect)
if state & RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED:
partial_bg.SetBottom(iYBorder - 1)
else:
partial_bg.height -= (iYBorder - partial_bg.y + 1)
partial_bg.y = iYBorder + 1
dc.DrawLine(rect.x, iYBorder, rect.x + rect.width, iYBorder)
bg_rect.Intersect(partial_bg)
elif result == RIBBON_BUTTONBAR_BUTTON_MEDIUM:
iArrowWidth = 9
if state & RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED:
bg_rect.width -= iArrowWidth
dc.DrawLine(bg_rect.x + bg_rect.width, rect.y, bg_rect.x + bg_rect.width, rect.y + rect.height)
else:
iArrowWidth -= 1
bg_rect.x += bg_rect.width - iArrowWidth
bg_rect.width = iArrowWidth
dc.DrawLine(bg_rect.x - 1, rect.y, bg_rect.x - 1, rect.y + rect.height)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
dc.SetPen(wx.TRANSPARENT_PEN)
if state & RIBBON_BUTTONBAR_BUTTON_ACTIVE_MASK:
dc.SetBrush(self._button_bar_active_background_brush)
else:
dc.SetBrush(self._button_bar_hover_background_brush)
dc.DrawRectangle(bg_rect.x, bg_rect.y, bg_rect.width, bg_rect.height)
dc.SetFont(self._button_bar_label_font)
dc.SetTextForeground(self._button_bar_label_colour)
self.DrawButtonBarButtonForeground(dc, rect, kind, state, label, bitmap_large, bitmap_small)
def DrawToolBarBackground(self, dc, wnd, rect):
"""
Draw the background for a L{RibbonToolBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The which is being drawn onto. In most cases this will be
a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. Some of this rectangle
will later be drawn over using L{DrawToolGroupBackground} and L{DrawTool},
but not all of it will (unless there is only a single group of tools).
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
def DrawToolGroupBackground(self, dc, wnd, rect):
"""
Draw the background for a group of tools on a L{RibbonToolBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto. In most cases this will
be a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. This rectangle is a union
of the individual tools' rectangles. As there are no gaps between tools, this
rectangle will be painted over exactly once by calls to L{DrawTool}. The
group background could therefore be painted by L{DrawTool}, though it can be
conceptually easier and more efficient to draw it all at once here. The
rectangle will be entirely within a rectangle on the same device context
previously painted with L{DrawToolBarBackground}.
"""
dc.SetPen(self._toolbar_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
dc.GradientFillLinear(bg_rect, self._tool_background_colour, self._tool_background_gradient_colour, wx.SOUTH)
def DrawTool(self, dc, wnd, rect, bitmap, kind, state):
"""
Draw a single tool (for a L{RibbonToolBar} control).
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto. In most cases this will
be a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will at least the size returned by L{RibbonMSWArtProvider.GetToolSize}, and the height of it will
be equal for all tools within the same group. The rectangle will be entirely
within a rectangle on the same device context previously painted with
L{DrawToolGroupBackground};
:param `bitmap`: The bitmap to use as the tool's foreground. If the tool is a
hybrid or dropdown tool, then the foreground should also contain a standard
dropdown button;
:param `kind`: The kind of tool to draw (normal, dropdown, or hybrid);
:param `state`: A combination of wx.RibbonToolBarToolState flags giving the
state of the tool and it's relative position within a tool group.
"""
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
if state & RIBBON_TOOLBAR_TOOL_LAST == 0:
bg_rect.width += 1
is_custom_bg = (state & (RIBBON_TOOLBAR_TOOL_HOVER_MASK | RIBBON_TOOLBAR_TOOL_ACTIVE_MASK)) != 0
is_split_hybrid = kind == RIBBON_BUTTON_HYBRID and is_custom_bg
# Background
if is_custom_bg:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._tool_hover_background_brush)
dc.DrawRectangle(bg_rect.x, bg_rect.y, bg_rect.width, bg_rect.height)
if state & RIBBON_TOOLBAR_TOOL_ACTIVE_MASK:
active_rect = wx.Rect(*bg_rect)
if kind == RIBBON_BUTTON_HYBRID:
active_rect.width -= 8
if state & RIBBON_TOOLBAR_TOOL_DROPDOWN_ACTIVE:
active_rect.x += active_rect.width
active_rect.width = 8
dc.SetBrush(self._tool_active_background_brush)
dc.DrawRectangle(active_rect.x, active_rect.y, active_rect.width, active_rect.height)
# Border
if is_custom_bg:
dc.SetPen(self._toolbar_hover_borden_pen)
else:
dc.SetPen(self._toolbar_border_pen)
if state & RIBBON_TOOLBAR_TOOL_FIRST == 0:
existing = dc.GetPixel(rect.x, rect.y + 1)
if existing == wx.NullColour or existing != self._toolbar_hover_borden_pen.GetColour():
dc.DrawLine(rect.x, rect.y + 1, rect.x, rect.y + rect.height - 1)
if is_custom_bg:
border_rect = wx.Rect(*bg_rect)
border_rect.Inflate(1, 1)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(border_rect.x, border_rect.y, border_rect.width, border_rect.height)
# Foreground
avail_width = bg_rect.GetWidth()
if kind != RIBBON_BUTTON_NORMAL:
avail_width -= 8
if is_split_hybrid:
dc.DrawLine(rect.x + avail_width + 1, rect.y, rect.x + avail_width + 1, rect.y + rect.height)
dc.DrawBitmap(self._toolbar_drop_bitmap, bg_rect.x + avail_width + 2, bg_rect.y + (bg_rect.height / 2) - 2, True)
dc.DrawBitmap(bitmap, bg_rect.x + (avail_width - bitmap.GetWidth()) / 2, bg_rect.y + (bg_rect.height - bitmap.GetHeight()) / 2, True)
| agpl-3.0 |
iarroyof/distributionalSemanticStabilityThesis | mklObj.py | 2 | 55729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
__author__ = 'Ignacio Arroyo-Fernandez'
from modshogun import *
from tools.load import LoadMatrix
from sklearn.metrics import r2_score
import random
from math import sqrt
import numpy
from os import getcwd
from sys import stderr
from pdb import set_trace as st
def open_configuration_file(fileName):
""" Loads the input data configuration file. Lines which start with '#' are ignored. No lines different from
configuration ones (even blank ones) at top are allowed. The amount of lines at top are exclusively either three or
five (see below for allowed contents).
The first line may be specifying the train data file in sparse market matrix format.
The second line may be specifying the test data file in sparse market matrix format.
The third line may be specifying the train labels file. An scalar by line must be associated as label of a vector
in training data.
The fourth line may be specifying the test labels file. An scalar by line must be associated as label of a vector
in test data.
The fifth line indicates options for the MKL object:
First character : Problem type : valid_options = {r: regression, b: binary, m: multiclass}
Second character: Machine mode : valid_options = {l: learning_mode, p: pattern_recognition_mode}
Any other characters and amount of they will be ignored or caught as errors.
For all configuration lines no other kind of content is allowed (e.g. comments in line ahead).
Training data (and its labels) is optional. Whenever no five configuration lines are detected in this file,
the first line will be considered as the test data file name, the second line as de test labels and third line as
the MKL options. An error exception will be raised otherwise (e.g. no three or no five configuration lines).
"""
with open(fileName) as f:
configuration_lines = f.read().splitlines()
problem_modes = {'r':'regression', 'b':'binary', 'm':'multiclass'}
machine_modes = {'l':'learning', 'p':'pattern_recognition'}
cls = 0 # Counted number of configuration lines from top.
ncls = 5 # Number of configuration lines allowed.
for line in configuration_lines:
if not line.startswith('#'):
cls += 1
else:
break
if cls == ncls:
mode = configuration_lines[4]
configuration = {}
if len(mode) == 2:
try:
configuration['problem_mode'] = problem_modes[mode[0]]
configuration['machine_mode'] = machine_modes[mode[1]]
except KeyError:
sys.stderr.write('\nERROR: Incorrect configuration file. Invalid machine mode. See help for mklObj.open_configuration_file().')
else:
sys.stderr.write('\nERROR: Incorrect configuration file. Invalid number of lines. See help for mklObj.open_configuration_file().')
exit()
Null = ncls # Null index
if configuration['machine_mode'] == 'learning': # According to availability of training files, indexes are setted.
trf = 0; tsf = 1; trlf = 2 # training_file, test_file, training_labels_file, test_labels_file, mode
tslf = 3; mf = Null
configuration_lines[ncls] = None
del(configuration_lines[ncls+1:]) # All from the first '#' onwards is ignored.
elif configuration['machine_mode'] == 'pattern_recognition':
trf = 0; tsf = 1; trlf = Null # training_file, test_file, test_labels_file, mode, model_file
tslf = 2; mf = 3
configuration_lines[ncls] = None
del(configuration_lines[ncls+1:])
configuration['training_file'] = configuration_lines[trf]
configuration['test_file'] = configuration_lines[tsf]
configuration['training_labels_file'] = configuration_lines[trlf]
configuration['test_labels_file'] = configuration_lines[tslf]
configuration['model_file'] = configuration_lines[mf]
return configuration
# Loading toy multiclass data from files
def load_multiclassToy(dataRoute, fileTrain, fileLabels):
""" :returns: [RealFeatures(training_data), RealFeatures(test_data), MulticlassLabels(train_labels),
MulticlassLabels(test_labels)]. It is a set of Shogun training objects for raising a 10-class classification
problem. This function is a modified version from http://www.shogun-toolbox.org/static/notebook/current/MKL.html
Pay attention to input parameters because their documentations is valid for acquiring data for any multiclass
problem with Shogun.
:param dataRoute: The allocation directory of plain text file containing the train and test data.
:param fileTrain: The name of the text file containing the train and test data. Each row of the file contains a
sample vector and each column is a dimension of such a sample vector.
:param fileLabels: The name of the text file containing the train and test labels. Each row must to correspond to
each sample in fileTrain. It must be at the same directory specified by dataRoute.
"""
lm = LoadMatrix()
dataSet = lm.load_numbers(dataRoute + fileTrain)
labels = lm.load_labels(dataRoute + fileLabels)
return (RealFeatures(dataSet.T[0:3 * len(dataSet.T) / 4].T), # Return the training set, 3/4 * dataSet
RealFeatures(dataSet.T[(3 * len(dataSet.T) / 4):].T), # Return the test set, 1/4 * dataSet
MulticlassLabels(labels[0:3 * len(labels) / 4]), # Return corresponding train and test labels
MulticlassLabels(labels[(3 * len(labels) / 4):]))
# 2D Toy data generator
def generate_binToy(file_data = None, file_labels = None):
""":return: [RealFeatures(train_data),RealFeatures(train_data),BinaryLabels(train_labels),BinaryLabels(test_labels)]
This method generates random 2D training and test data for binary classification. The labels are {-1, 1} vectors.
"""
num = 30
num_components = 4
means = numpy.zeros((num_components, 2))
means[0] = [-1, 1]
means[1] = [2, -1.5]
means[2] = [-1, -3]
means[3] = [2, 1]
covs = numpy.array([[1.0, 0.0], [0.0, 1.0]])
gmm = GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs, i) for i in range(num_components)]
gmm.set_coef(numpy.array([1.0, 0.0, 0.0, 0.0]))
xntr = numpy.array([gmm.sample() for i in xrange(num)]).T
xnte = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 1.0, 0.0, 0.0]))
xntr1 = numpy.array([gmm.sample() for i in xrange(num)]).T
xnte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 0.0, 1.0, 0.0]))
xptr = numpy.array([gmm.sample() for i in xrange(num)]).T
xpte = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 0.0, 0.0, 1.0]))
xptr1 = numpy.array([gmm.sample() for i in xrange(num)]).T
xpte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T
if not file_data:
return (RealFeatures(numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1)), # Train Data
RealFeatures(numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), # Test Data
BinaryLabels(numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num)))), # Train Labels
BinaryLabels(numpy.concatenate((-numpy.ones(10000), numpy.ones(10000))))) # Test Labels
else:
data_set = numpy.concatenate((numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1),
numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), axis = 1).T
labels = numpy.concatenate((numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num))),
numpy.concatenate((-numpy.ones(10000), numpy.ones(10000)))), axis = 1).astype(int)
indexes = range(len(data_set))
numpy.random.shuffle(indexes)
fd = open(file_data, 'w')
fl = open(file_labels, 'w')
for i in indexes:
fd.write('%f %f\n' % (data_set[i][0],data_set[i][1]))
fl.write(str(labels[i])+'\n')
fd.close()
fl.close()
#numpy.savetxt(file_data, data_set, fmt='%f')
#numpy.savetxt(file_labels, labels, fmt='%d')
def load_binData(tr_ts_portion = None, fileTrain = None, fileLabels = None, dataRoute = None):
if not dataRoute:
dataRoute = getcwd()+'/'
assert fileTrain and fileLabels # One (or both) of the input files are not given.
assert (tr_ts_portion > 0.0 and tr_ts_portion <= 1.0) # The proportion of dividing the data set into train and test is in (0, 1]
lm = LoadMatrix()
dataSet = lm.load_numbers(dataRoute + fileTrain)
labels = lm.load_labels(dataRoute + fileLabels)
return (RealFeatures(dataSet.T[0:tr_ts_portion * len(dataSet.T)].T), # Return the training set, 3/4 * dataSet
RealFeatures(dataSet.T[tr_ts_portion * len(dataSet.T):].T), # Return the test set, 1/4 * dataSet
BinaryLabels(labels[0:tr_ts_portion * len(labels)]), # Return corresponding train and test labels
BinaryLabels(labels[tr_ts_portion * len(labels):]))
def load_regression_data(fileTrain = None, fileTest = None, fileLabelsTr = None, fileLabelsTs = None, sparse=False):
""" This method loads data from sparse mtx file format ('CSR' preferably. See Python sci.sparse matrix
format, also referred to as matrix market read and write methods). Label files should contain a column of
these labels, e.g. see the contents of a three labels file:
1.23
-102.45
2.2998438943
Loading uniquely test labels is allowed (training labels are optional). In pattern_recognition mode no
training labels are required. None is returned out for corresponding Shogun label object. Feature list
returned:
[features_tr, features_ts, labels_tr, labels_ts]
Returned data is float type (dtype='float64'). This is the minimum data length allowed by Shogun given the
sparse distance functions does not allow other ones, e.g. short (float32).
"""
assert fileTrain and fileTest and fileLabelsTs # Necessary test labels as well as test and train data sets specification.
from scipy.io import mmread
lm = LoadMatrix()
if sparse:
sci_data_tr = mmread(fileTrain).asformat('csr').astype('float64').T
features_tr = SparseRealFeatures(sci_data_tr) # Reformated as CSR and 'float64' type for
sci_data_ts = mmread(fileTest).asformat('csr').astype('float64').T # compatibility with SparseRealFeatures
features_ts = SparseRealFeatures(sci_data_ts)
else:
features_tr = RealFeatures(lm.load_numbers(fileTrain).astype('float64'))
features_ts = RealFeatures(lm.load_numbers(fileTest).astype('float64'))
labels_ts = RegressionLabels(lm.load_labels(fileLabelsTs))
if fileTrain and fileLabelsTr: # sci_data_x: Any sparse data type in the file.
labels_tr = RegressionLabels(lm.load_labels(fileLabelsTr))
else:
labels_tr = None
return features_tr, features_ts, labels_tr, labels_ts
# Exception handling:
class customException(Exception):
""" This exception prevents training inconsistencies. It could be edited for accepting a complete
dictionary of exceptions if desired.
"""
def __init__(self, message):
self.parameter = message
def __str__(self):
return repr(self.parameter)
# Basis kernel parameter generation:
def sigmaGen(self, hyperDistribution, size, rango, parameters):
""" :return: list of float
This module generates the pseudorandom vector of widths for basis Gaussian kernels according to a distribution, i.e.
hyperDistribution =
{'linear',
'quadratic',
'loggauss'*,
'gaussian'*,
'triangular', # parameters[0] is the median of the distribution. parameters[1] has not effect.
'pareto',
'beta'*,
'gamma',
'weibull'}.
Names marked with * require parameters, e.g. for 'gaussian', parameters = [mean, width]. The input 'size' is the
amount of segments the distribution domain will be discretized out. The 'rango' input are the minimum and maximum
values of the obtained distributed values. The 'parameters' of these weight vector distributions are set to common
values of each distribution by default, but they can be modified.
:param hyperDistribution: string
:param size: It is the number of basis kernels for the MKL object.
:param rango: It is the range to which the basis kernel parameters will pertain. For some basis kernels families
this input parameter has not effect.
:param parameters: It is a list of parameters of the distribution of the random weights, e.g. for a gaussian
distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel families this input parameter
has not effect: {'linear', 'quadratic', 'triangular', 'pareto', 'gamma', 'weilbull', }
.. seealso: fit_kernel() function documentation.
"""
# Validating th inputs
assert (isinstance(size, int) and size > 0)
assert (rango[0] < rango[1] and len(rango) == 2)
# .. todo: Revise the other linespaces of the other distributions. They must be equally consistent than the
# .. todo: Gaussian one. Change 'is' when verifying equality between strings (PEP008 recommendation).
sig = []
if hyperDistribution == 'linear':
line = numpy.linspace(rango[0], rango[1], size*2)
sig = random.sample(line, size)
return sig
elif hyperDistribution == 'quadratic':
sig = numpy.square(random.sample(numpy.linspace(int(sqrt(rango[0])), int(sqrt(rango[1]))), size))
return sig
elif hyperDistribution == 'gaussian':
assert parameters[1] > 0 # The width is greater than zero?
i = 0
while i < size:
numero = random.gauss(parameters[0], parameters[1])
if rango[0] <= numero <= rango[1]: # Validate the initial point of
sig.append(numero) # 'range'. If not met, loop does
i += 1 # not end, but resets
# If met, the number is appended
return sig # to 'sig' width list.
elif hyperDistribution == 'triangular':
assert rango[0] <= parameters[0] <= rango[1] # The median is in the range?
sig = numpy.random.triangular(rango[0], parameters[0], rango[1], size)
return sig
elif hyperDistribution == 'beta':
assert (parameters[0] >= 0 and parameters[1] >= 0) # Alpha and Beta parameters are non-negative?
sig = numpy.random.beta(parameters[0], parameters[1], size) * (rango[1] - rango[0]) + rango[0]
return sig
elif hyperDistribution == 'pareto':
return numpy.random.pareto(5, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'gamma':
return numpy.random.gamma(shape=1, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'weibull':
return numpy.random.weibull(2, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'loggauss':
assert parameters[1] > 0 # The width is greater than zero?
i = 0
while i < size:
numero = random.lognormvariate(parameters[0], parameters[1])
if numero > rango[0] and numero < rango[1]:
sig.append(numero)
i += 1
return sig
else:
print 'The entered hyperparameter distribution is not allowed: '+hyperDistribution
#pdb.set_trace()
# Combining kernels
def genKer(self, featsL, featsR, basisFam, widths=[5.0, 4.0, 3.0, 2.0, 1.0], sparse = False):
""":return: Shogun CombinedKernel object.
This module generates a list of basis kernels. These kernels are tuned according to the vector ''widths''. Input
parameters ''featsL'' and ''featsR'' are Shogun feature objects. In the case of a learnt RKHS, these both objects
should be derived from the training SLM vectors, by means of the Shogun constructor realFeatures(). This module also
appends basis kernels to a Shogun combinedKernel object.
The kernels to be append are left in ''combKer'' object (see code), which is returned. We have analyzed some basis
families available in Shogun, so possible string values of 'basisFam' are:
basisFam = ['gaussian',
'inverseQuadratic',
'polynomial',
'power',
'rationalQuadratic',
'spherical',
'tstudent',
'wave',
'wavelet',
'cauchy',
'exponential']
"""
allowed_sparse = ['gaussian', 'polynomial'] # Change this assertion list and function if different kernels are needed.
assert not (featsL.get_feature_class() == featsR.get_feature_class() == 'C_SPARSE') or basisFam in allowed_sparse # Sparse type is not compatible with specified kernel or feature types are different.
kernels = []
if basisFam == 'gaussian':
for w in widths:
k=GaussianKernel()
#k.init(featsL, featsR)
#st()
kernels.append(k)
kernels[-1].set_width(w)
kernels[-1].init(featsL, featsR)
#st()
elif basisFam == 'inverseQuadratic': # For this (and others below) kernel it is necessary fitting the
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # distance matrix at this moment k = 2 is for l_2 norm
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(InverseMultiQuadricKernel(0, w, dst))
elif basisFam == 'polynomial':
for w in widths:
kernels.append(PolyKernel(0, w, False))
elif basisFam == 'power': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(PowerKernel(0, w, dst))
elif basisFam == 'rationalQuadratic': # At least for images, using 3-norm make differences
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # in performance
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(RationalQuadraticKernel(0, w, dst))
elif basisFam == 'spherical': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(SphericalKernel(0, w, dst))
elif basisFam == 'tstudent': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(TStudentKernel(0, w, dst))
elif basisFam == 'wave': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(WaveKernel(0, w, dst))
elif basisFam == 'wavelet' and not sparse: # At least for images it is very low the performance with this kernel.
for w in widths: # It remains pending, for now, analysing its parameters.
kernels.append(WaveletKernel(0, w, 0))
elif basisFam == 'cauchy':
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(CauchyKernel(0, w, dst))
elif basisFam == 'exponential': # For this kernel it is necessary specifying features at the constructor
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(ExponentialKernel(featsL, featsR, w, dst, 0))
elif basisFam == 'anova' and not sparse: # This kernel presents a warning in training:
"""RuntimeWarning: [WARN] In file /home/iarroyof/shogun/src/shogun/classifier/mkl/MKLMulticlass.cpp line
198: CMKLMulticlass::evaluatefinishcriterion(...): deltanew<=0.Switching back to weight norsm
difference as criterion.
"""
for w in widths:
kernels.append(ANOVAKernel(0, w))
else:
raise NameError('Unknown Kernel family name!!!')
combKer = CombinedKernel()
#features_tr = CombinedFeatures()
for k in kernels:
combKer.append_kernel(k)
#features_tr.append_feature_obj(featsL)
#combKer.init(features_tr, features_tr)
#combKer.init(featsL,featsR)
return combKer#, features_tr
# Defining the compounding kernel object
class mklObj(object):
"""Default self definition of the Multiple Kernel Learning object. This object uses previously defined methods for
generating a linear combination of basis kernels that can be constituted from different families. See at
fit_kernel() function documentation for details. This function trains the kernel weights. The object has other
member functions offering utilities. See the next instantiation and using example:
import mklObj as mk
kernel = mk.mklObj(weightRegNorm = 2,
mklC = 2, # This is the Cparameter of the underlaying SVM.
SVMepsilon = 1e-5,
threads = 2,
MKLepsilon = 0.001,
probome = 'Multiclass',
verbose = False) # IMPORTANT: Don't use this feature (True) if you are working in pipe mode.
# The object will print undesired outputs to the stdout.
The above values are the defaults, so if they are suitable for you it is possible instantiating the object by simply
stating: kernel = mk.mklObj(). Even it is possible modifying a subset of input parameters (keeping others as
default): kernel = mk.mklObj(weightRegNorm = 1, mklC = 10, SVMepsilon = 1e-2). See the documentation of each setter
below for allowed setting parameters without new instantiations.
Now, once main parameters has been setted, fit the kernel:
kernel.fit_kernel(featsTr = feats_train,
targetsTr = labelsTr,
featsTs = feats_test,
targetsTs = labelsTs,
kernelFamily = 'gaussian',
randomRange = [50, 200], # For homogeneous poly kernels these two parameter
randomParams = [50, 20], # sets have not effect. No basis kernel parameters
hyper = 'linear', # Also with not effect when kernel family is polynomial
pKers = 3) # and some other powering forms.
Once the MKL object has been fitted, you can get what you need from it. See getters documentation listed below.
"""
def __init__(self, weightRegNorm=2.0, mklC=2.0, SVMepsilon=0.01, model_file = None,
threads=4, MKLepsilon=0.01, problem='regression', verbose=False, mode = 'learning', sparse = False):
"""Object initialization. This procedure is regardless of the input data, basis kernels and corresponding
hyperparameters (kernel fitting).
"""
mkl_problem_object = {'regression':(MKLRegression, [mklC, mklC]),
'binary': (MKLClassification, [mklC, mklC]),
'multiclass': (MKLMulticlass, mklC)}
self.mode = mode
self.sparse = sparse
assert not model_file and mode != 'pattern_recognition' or (
model_file and mode == 'pattern_recognition')# Model file or pattern_recognition mode must be specified.
self.__problem = problem
self.verbose = verbose # inner training process verbose flag
self.Matrx = False # Kind of returned learned kernel object. See getter documentation of these
self.expansion = False # object configuration parameters for details. Only modifiable by setter.
self.__testerr = 0
if mode == 'learning':
try:
self.mkl = mkl_problem_object[problem][0]()
self.mklC = mkl_problem_object[problem][1]
except KeyError:
sys.stderr.write('Error: Given problem type is not valid.')
exit()
#################<<<<<<<<<<<>>>>>>>>>>
self.mkl.set_C_mkl(5.0) # This is the regularization parameter for the MKL weights regularizer (NOT the SVM C)
self.weightRegNorm = weightRegNorm # Setting the basis' weight vector norm
self.SVMepsilon = SVMepsilon # setting the transducer stop (convergence) criterion
self.MKLepsilon = MKLepsilon # setting the MKL stop criterion. The value suggested by
# Shogun examples is 0.001. See setter docs for details
elif mode == 'pattern_recognition':
[self.mkl, self.mkl_model] = self.load_mkl_model(file_name = model_file, model_type = problem)
self.sigmas = self.mkl_model['widths']
self.threads = threads # setting number of training threads. Verify functionality!!
def fit_pretrained(self, featsTr, featsTs):
""" This method sets up a MKL machine by using parameters from self.mkl_model preloaded dictionary which
contains preptrained model paremeters, e.g. weights and widths.
"""
self.ker = genKer(self, featsTr, featsTs, sparse = self.sparse,
basisFam = self.family_translation[self.mkl_model['family']], widths = self.sigmas)
self.ker.set_subkernel_weights(self.mkl_model['weights']) # Setting up pretrained weights to the
self.ker.init(featsTr, featsTs) # new kernel
# Self Function for kernel generation
def fit_kernel(self, featsTr, targetsTr, featsTs, targetsTs, randomRange=[1, 50], randomParams=[1, 1],
hyper='linear', kernelFamily='gaussian', pKers=3):
""" :return: CombinedKernel Shogun object.
This method is used for training the desired compound kernel. See documentation of the 'mklObj'
object for using example. 'featsTr' and 'featsTs' are the training and test data respectively.
'targetsTr' and 'targetsTs' are the training and test labels, respectively. All they must be Shogun
'RealFeatures' and 'MulticlassLabels' objects respectively.
The 'randomRange' parameter defines the range of numbers from which the basis kernel parameters will be
drawn, e.g. Gaussian random widths between 1 and 50 (the default). The 'randomParams' input parameter
states the parameters of the pseudorandom distribution of the basis kernel parameters to be drawn, e.g.
Gaussian-pseudorandom-generated weights with std. deviation equal to 1 and mean equal to 1 (the default).
The 'hyper' input parameter defines the distribution of the pseudorandom-generated weights. See
documentation of the sigmaGen() method of the 'mklObj' object to see a list of possible basis kernel
parameter distributions. The 'kernelFamily' input parameter is the basis kernel family to be append to
the desired compound kernel if you select, e.g., the default 'gaussian' family, all elements of the
learned linear combination will be gaussians (each differently weighted and parametrized). See
documentation of the genKer() method of the 'mklObj' object to see a list of allowed basis kernel
families. The 'pKers' input parameter defines the size of the learned kernel linear combination, i.e.
how many basis kernels to be weighted in the training and therefore, how many coefficients will have the
Fourier series of data (the default is 3).
.. note:: In the cases of kernelFamily = {'polynomial' or 'power' or 'tstudent' or 'anova'}, the input
parameters {'randomRange', 'randomParams', 'hyper'} have not effect, because these kernel families do not
require basis kernel parameters.
:param featsTr: RealFeatures Shogun object conflating the training data.
:param targetsTr: MulticlassLabels Shogun object conflating the training labels.
:param featsTr: RealFeatures Shogun object conflating the test data.
:param targetsTr: MulticlassLabels Shogun object conflating the test labels.
:param randomRange: It is the range to which the basis kernel parameters will pertain. For some basis
kernels families this input parameter has not effect.
:param randomParams: It is a list of parameters of the distribution of the random weights, e.g. for a
gaussian distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel
families this input parameter has not effect.
:param hyper: string which specifies the name of the basis kernel parameter distribution. See
documentation for sigmaGen() function for viewing allowed strings (names).
:param kernelFamily: string which specifies the name of the basis kernel family. See documentation for
genKer() function for viewing allowed strings (names).
:param pKers: This is the number of basis kernels for the MKL object (linear combination).
"""
# Inner variable copying:
self._featsTr = featsTr
self._targetsTr = targetsTr
self._hyper = hyper
self._pkers = pKers
self.basisFamily = kernelFamily
if self.verbose: # Printing the training progress
print '\nNacho, multiple <' + kernelFamily + '> Kernels have been initialized...'
print "\nInput main parameters: "
print "\nHyperarameter distribution: ", self._hyper, "\nLinear combination size: ", pKers, \
'\nWeight regularization norm: ', self.weightRegNorm, \
'Weight regularization parameter: ',self.mklC
if self.__problem == 'multiclass':
print "Classes: ", targetsTr.get_num_classes()
elif self.__problem == 'binary':
print "Classes: Binary"
elif self.__problem == 'regression':
print 'Regression problem'
# Generating the list of subkernels. Creating the compound kernel. For monomial-nonhomogeneous (polynomial)
# kernels the hyperparameters are uniquely the degree of each monomial, in the form of a sequence. MKL finds the
# coefficient (weight) for each monomial in order to find a compound polynomial.
if kernelFamily == 'polynomial' or kernelFamily == 'power' or \
kernelFamily == 'tstudent' or kernelFamily == 'anova':
self.sigmas = range(1, pKers+1)
self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse)
else:
# We have called 'sigmas' to any basis kernel parameter, regardless if the kernel is Gaussian or not. So
# let's generate the widths:
self.sigmas = sorted(sigmaGen(self, hyperDistribution=hyper, size=pKers,
rango=randomRange, parameters=randomParams))
try:
z = self.sigmas.index(0)
self.sigmas[z] = 0.1
except ValueError:
pass
try: # Verifying if number of kernels is greater or equal to 2
if pKers <= 1 or len(self.sigmas) < 2:
raise customException('Senseless MKLClassification use!!!')
except customException, (instance):
print 'Caugth: ' + instance.parameter
print "-----------------------------------------------------"
print """The multikernel learning object is meaningless for less than 2 basis
kernels, i.e. pKers <= 1, so 'mklObj' couldn't be instantiated."""
print "-----------------------------------------------------"
self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse)
if self.verbose:
print 'Widths: ', self.sigmas
# Initializing the compound kernel
# combf_tr = CombinedFeatures()
# combf_tr.append_feature_obj(self._featsTr)
# self.ker.init(combf_tr, combf_tr)
try: # Verifying if number of kernels was greater or equal to 2 after training
if self.ker.get_num_subkernels() < 2:
raise customException(
'Multikernel coefficients were less than 2 after training. Revise object settings!!!')
except customException, (instance):
print 'Caugth: ' + instance.parameter
# Verbose for learning surveying
if self.verbose:
print '\nKernel fitted...'
# Initializing the transducer for multiclassification
features_tr = CombinedFeatures()
features_ts = CombinedFeatures()
for k in self.sigmas:
features_tr.append_feature_obj(self._featsTr)
features_ts.append_feature_obj(featsTs)
self.ker.init(features_tr, features_tr)
self.mkl.set_kernel(self.ker)
self.mkl.set_labels(self._targetsTr)
# Train to return the learnt kernel
if self.verbose:
print '\nLearning the machine coefficients...'
# ------------------ The most time consuming code segment --------------------------
self.crashed = False
try:
self.mkl.train()
except SystemError:
self.crashed = True
self.mkl_model = self.keep_mkl_model(self.mkl, self.ker, self.sigmas) # Let's keep the trained model
if self.verbose: # for future use.
print 'Kernel trained... Weights: ', self.weights
# Evaluate the learnt Kernel. Here it is assumed 'ker' is learnt, so we only need for initialize it again but
# with the test set object. Then, set the initialized kernel to the mkl object in order to 'apply'.
self.ker.init(features_tr, features_ts) # Now with test examples. The inner product between training
#st()
def pattern_recognition(self, targetsTs):
self.mkl.set_kernel(self.ker) # and test examples generates the corresponding Gram Matrix.
if not self.crashed:
out = self.mkl.apply() # Applying the obtained Gram Matrix
else:
out = RegressionLabels(-1.0*numpy.ones(targetsTs.get_num_labels()))
self.estimated_out = list(out.get_labels())
# ----------------------------------------------------------------------------------
if self.__problem == 'binary': # If the problem is either binary or multiclass, different
evalua = ErrorRateMeasure() # performance measures are computed.
self.__testerr = 100 - evalua.evaluate(out, targetsTs) * 100
elif self.__problem == 'multiclass':
evalua = MulticlassAccuracy()
self.__testerr = evalua.evaluate(out, targetsTs) * 100
elif self.__problem == 'regression': # Determination Coefficient was selected for measuring performance
#evalua = MeanSquaredError()
#self.__testerr = evalua.evaluate(out, targetsTs)
self.__testerr = r2_score(self.estimated_out, list(targetsTs.get_labels()))
# Verbose for learning surveying
if self.verbose:
print 'Kernel evaluation ready. The precision was: ', self.__testerr, '%'
def keep_mkl_model(self, mkl, kernel, widths, file_name = None):
""" Python reimplementated function for saving a pretrained MKL machine.
This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a
dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned.
Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the
first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert
the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise.
"""
mkl_machine = {}
support=[]
mkl_machine['num_support_vectors'] = mkl.get_num_support_vectors()
mkl_machine['bias']=mkl.get_bias()
for i in xrange(mkl_machine['num_support_vectors']):
support.append((mkl.get_alpha(i), mkl.get_support_vector(i)))
mkl_machine['support'] = support
mkl_machine['weights'] = list(kernel.get_subkernel_weights())
mkl_machine['family'] = kernel.get_first_kernel().get_name()
mkl_machine['widths'] = widths
if file_name:
f = open(file_name,'w')
f.write(str(mkl_machine)+'\n')
f.close()
else:
return mkl_machine
def load_mkl_model(self, file_name, model_type = 'regression'):
""" This method receives a file name (if it is not in pwd, full path must be given) and a model type to
be loaded {'regression', 'binary', 'multiclass'}. The loaded file must contain a t least a dictionary at
its top. This dictionary must contain a key called 'model' whose value must be a dictionary, from which
model parameters will be read. For example:
{'key_0':value, 'key_1':value,..., 'model':{'family':'PolyKernel', 'bias':1.001,...}, key_n:value}
Four objects are returned. The MKL model which is tuned to those parameters stored at the given file. A
numpy array containing learned weights of a CombinedKernel. The widths corresponding to returned kernel
weights and the kernel family. Be careful with the kernel family you are loading because widths no
necessarily are it, but probably 'degrees', e.g. for the PolyKernel family.
The Combined kernel must be instantiated outside this method, thereby loading to it corresponding
weights and widths.
"""
with open(file_name, 'r') as pointer:
mkl_machine = eval(pointer.read())['learned_model']
if model_type == 'regression':
mkl = MKLRegression() # A new two-class MKL object
elif model_type == 'binary':
mkl = MKLClassification()
elif model_type == 'multiclass':
mkl = MKLMulticlass()
else:
sys.stderr.write('ERROR: Unknown problem type in model loading.')
exit()
mkl.set_bias(mkl_machine['bias'])
mkl.create_new_model(mkl_machine['num_support_vectors']) # Initialize the inner SVM
for i in xrange(mkl_machine['num_support_vectors']):
mkl.set_alpha(i, mkl_machine['support'][i][0])
mkl.set_support_vector(i, mkl_machine['support'][i][1])
mkl_machine['weights'] = numpy.array(mkl_machine['weights'])
return mkl, mkl_machine
# Getters (properties):
@property
def family_translation(self):
"""
"""
self.__family_translation = {'PolyKernel':'polynomial', 'GaussianKernel':'gaussian',
'ExponentialKernel':'exponential'}
return self.__family_translation
@property
def mkl_model(self):
""" This property stores the MKL model parameters learned by the self-object. These parameters can be
stored into a file for future configuration of a non-trained MKL new MKL object. Also probably passed
onwards for showing results.
"""
return self.__mkl_model
@property
def estimated_out(self):
""" This property is the mkl result after applying.
"""
return self.__estimated_out
@property
def compoundKernel(self):
"""This method is used for getting the kernel object, i.e. the learned MKL object, which can be unwrapped
into its matrix form instead of getting a Shogun object. Use the input parameters Matrix = True,
expansion = False for getting the compound matrix of reals. For instance:
mklObj.Matrix = True
mklObj.expansion = False
kernelMatrix = mklObj.compoundKernel
Use Matrix = True, expansion = True for getting the expanded linear combination of matrices and weights
separately, for instance:
mklObj.Matrix = True
mklObj.expansion = True
basis, weights = mklObj.compoundKernel
Use Matrix = False, expansion = False for getting the learned kernel Shogun object, for instance:
mklObj.Matrix = False
mklObj.expansion = False
kernelObj = mklObj.compoundKernel
.. warning:: Be careful with this latter variant of the method becuase of the large amount of needed
physical memory.
"""
if self.Matrx:
kernels = []
size = self.ker.get_num_subkernels()
for k in xrange(0, size - 1):
kernels.append(self.ker.get_kernel(k).get_kernel_matrix())
ws = self.weights
if self.expansion:
return kernels, ws # Returning the full expansion of the learned kernel.
else:
return sum(kernels * ws) # Returning the matrix linear combination, in other words,
else: # a projector matrix representation.
return self.ker # If matrix representation is not required, only the Shogun kernel
# object is returned.
@property
def sigmas(self):
"""This method is used for getting the current set of basis kernel parameters, i.e. widths, in the case
of the gaussian basis kernel.
:rtype : list of float
"""
return self.__sigmas
@property
def verbose(self):
"""This is the verbose flag, which is used for monitoring the object training procedure.
IMPORTANT: Don't use this feature (True) if you are working in pipe mode. The object will print undesired
outputs to the stdout.
:rtype : bool
"""
return self._verbose
@property
def Matrx(self):
"""This is a boolean property of the object. Its aim is getting and, mainly, setting the kind of object
we want to obtain as learned kernel, i.e. a Kernel Shogun object or a Kernel Matrix whose entries are
reals. The latter could require large amounts of physical memory. See the mklObj.compoundKernel property
documentation in this object for using details.
:rtype :bool
"""
return self.__Matrx
@property
def expansion(self):
"""This is a boolean property. Its aim is getting and, mainly, setting the mklObj object to return the
complete expansion of the learned kernel, i.e. a list of basis kernel matrices as well as their
corresponding coefficients. This configuration may require large amounts of physical memory. See the
mklObj.compoundKernel property documentation in this object for using details.
:rtype :bool
.. seealso:: the code and examples and documentation about :@property:`compoundKernel`
"""
return self.__expansion
@property
def weightRegNorm(self):
""" The value of this property is the basis' weight vector norm, e.g. :math:`||\beta||_p`, to be used as
regularizer. It controls the smoothing among basis kernel weights of the learned multiple kernel combination. On
one hand, If p=1 (the l_1 norm) the weight values B_i will be disproportionally between them, i.e. a few of them
will be >> 0,some other simply > 0 and many of them will be zero or very near to zero (the vector B will be
sparse). On the other hand, if p = 2 the weights B_i linearly distributed, i.e. their distribution shows an
uniform tilt in such a way the differences between pairs of them are not significant, but rather proportional to
the tilt of the distribution.
To our knowledge, such a tilt is certainly not explicitly taken into account as regularization hyperparameter,
although the parameter C \in [0, 1] is directly associated to it as scalar factor. Thus specifically for
C \in [0, 1], it operates the vector B by forcing to it to certain orientation which describes a tilt
m \in (0, 1)U(1, \infty) (with minima in the extremes of these subsets and maxima in their medians). Given that
C \n [0, 1], the scaling effect behaves such that linearly depresses low values of B_i, whilst highlights their
high values. The effect of C \in (1, \infty) is still not clearly studied, however it will be a bit different
than the above, but keeping its scalar effect.
Overall, as p tends to be >> 1 (or even p --> \\infty) the B_i values tend to be ever more uniformly
distributed. More specific and complex regularization operators are explained in .. seealso:: Schölkopf, B., & Smola, A. J.
(2002). Learning with kernels: Support vector machines, regularization, optimization, and beyond. MIT press.
:rtype : vector of float
"""
return self.__weightRegNorm
# function getters
@property
def weights(self):
"""This method is used for getting the learned weights of the MKL object. We first get the kernel weights into
a list object, before returning it. This is because 'get_subkernel_weights()' causes error while printing to an
output file by means of returning a nonlist object.
:rtype : list of float
"""
self.__weights = list(self.ker.get_subkernel_weights())
return self.__weights
@property
def SVMepsilon(self):
"""This method is used for getting the SVM convergence criterion (the minimum allowed error commited by
the transducer in training).
:rtype : float
.. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
.. seealso:: @SVMepsilon.setter
"""
return self.__SVMepsion
@property
def MKLepsilon(self):
"""This method is used for getting the MKL convergence criterion (the minimum allowed error committed by
the MKL object in test).
:rtype : float
.. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
.. seealso:: @MKLepsilon.setter
"""
return self.__MKLepsilon
@property
def mklC(self):
"""This method is used for setting regularization parameters. 'mklC' is a real value in multiclass problems,
while in binary problems it must be a list of two elements. These must be different when the two classes are
imbalanced, but must be equal for balanced densities in binary classification problems. For multiclass
problems, imbalanced densities are not considered.
:rtype : float
.. seealso:: See at page 4 of Bagchi, (2014) SVM Classifiers Based On Imperfect Training Data.
.. seealso:: @weightRegNorm property documentation for more details about C as regularization parameter.
"""
return self.__mklC
@property
def threads(self):
""" This property is used for getting and setting the number of threads in which the training procedure will be
will be segmented into a single machine processor core.
:rtype : int
.. seealso:: @threads.setter documentation.
"""
return self.__threads
# Readonly properties:
@property
def problem(self):
"""This method is used for getting the kind of problem the mklObj object will be trained for. If binary == True,
the you want to train the object for a two-class classification problem. Otherwise if binary == False, you want
to train the object for multiclass classification problems. This property can't be modified once the object has
been instantiated.
:rtype : bool
"""
return self.__problem
@property
def testerr(self):
"""This method is used for getting the test accuracy after training the MKL object. 'testerr' is a readonly
object property.
:rtype : float
"""
return self.__testerr
@property
def sparse(self):
"""This method is used for getting the sparse/dense mode of the MKL object.
:rtype : float
"""
return self.__sparse
@property
def crashed(self):
"""This method is used for getting the sparse/dense mode of the MKL object.
:rtype : float
"""
return self.__crashed
# mklObj (decorated) Setters: Binary configuration of the classifier cant be changed. It is needed to instantiate
# a new mklObj object.
@crashed.setter
def crashed(self, value):
assert isinstance(value, bool) # The model is not stored as a dictionary
self.__crashed = value
@mkl_model.setter
def mkl_model(self, value):
assert isinstance(value, dict) # The model is not stored as a dictionary
self.__mkl_model = value
@estimated_out.setter
def estimated_out(self, value):
self.__estimated_out = value
@sparse.setter
def sparse(self, value):
self.__sparse = value
@Matrx.setter
def Matrx(self, value):
"""
:type value: bool
.. seealso:: @Matrx property documentation.
"""
assert isinstance(value, bool)
self.__Matrx = value
@expansion.setter
def expansion(self, value):
"""
.. seealso:: @expansion property documentation
:type value: bool
"""
assert isinstance(value, bool)
self.__expansion = value
@sigmas.setter
def sigmas(self, value):
""" This method is used for setting desired basis kernel parameters for the MKL object. 'value' is a list of
real values of 'pKers' length. In 'learning' mode, be careful to avoid mismatching between the number of basis kernels of the
current compound kernel and the one you have in mind. A mismatch error could be arisen. In 'pattern_recognition'
mode, this quantity is taken from the learned model, which is stored at disk.
@type value: list of float
.. seealso:: @sigmas property documentation
"""
try:
if self.mode == 'learning':
if len(value) == self._pkers:
self.__sigmas = value
else:
raise customException('Size of basis kernel parameter list mismatches the size of the combined\
kernel. You can use len(CMKLobj.sigmas) to revise the mismatching.')
elif self.mode == 'pattern_recognition':
self.__sigmas = value
except customException, (instance):
print "Caught: " + instance.parameter
@verbose.setter
def verbose(self, value):
"""This method sets to True of False the verbose flag, which is used in turn for monitoring the object training
procedure.
@type value: bool
"""
assert isinstance(value, bool)
self._verbose = value
@weightRegNorm.setter
def weightRegNorm(self, value):
"""This method is used for changing the norm of the weight regularizer of the MKL object. Typically this
changing is useful for retrain the model with other regularizer.
@type value: float
..seealso:: @weightRegNorm property documentation.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_mkl_norm(value)
self.__weightRegNorm = value
@SVMepsilon.setter
def SVMepsilon(self, value):
"""This method is used for setting the SVM convergence criterion (the minimum allowed error commited by
the transducer in training). In other words, the low level of the learning process. The current basis
kernel combination is tested as the SVM kernel. Regardless of each basis' weights.
@type value: float
.. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_epsilon(value)
self.__SVMepsion = value
@MKLepsilon.setter
def MKLepsilon(self, value):
"""This method is used for setting the MKL convergence criterion (the minimum allowed error committed by
the MKL object in test). In other words, the high level of the learning process. The current basis
kernel combination is tested as the SVM kernel. The basis' weights are tuned until 'MKLeps' is reached.
@type value: float
.. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_mkl_epsilon(value)
self.__MKLepsilon = value
@mklC.setter
def mklC(self, value):
"""This method is used for setting regularization parameters. These are different when the two classes
are imbalanced and Equal for balanced densities in binary classification problems. For multiclass
problems imbalanced densities are not considered, so uniquely the first argument is caught by the method.
If one or both arguments are misplaced the default values are one both them.
@type value: float (for multiclass problems), [float, float] for binary and regression problems.
.. seealso:: Page 4 of Bagchi,(2014) SVM Classifiers Based On Imperfect Training Data.
"""
if self.__problem == 'binary' or self.__problem == 'regression':
assert len(value) == 2
assert (isinstance(value, (list, float)) and value[0] > 0.0 and value[1] > 0.0)
self.mkl.set_C(value[0], value[1])
elif self.__problem == 'multiclass':
assert (isinstance(value, float) and value > 0.0)
self.mkl.set_C(value)
self.__mklC = value
@threads.setter
def threads(self, value):
"""This method is used for changing the number of threads we want to be running with a single machine core.
These threads are not different parallel processes running in different machine cores.
"""
assert (isinstance(value, int) and value > 0)
self.mkl.parallel.set_num_threads(value) # setting number of training threads
self.__threads = value
| gpl-2.0 |
dharmabumstead/ansible | lib/ansible/modules/clustering/k8s/k8s_raw.py | 18 | 4093 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_raw
short_description: Manage Kubernetes (K8s) objects
version_added: "2.5"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift == 0.4.3"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s_raw:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s_raw:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s_raw:
state: present
src: /testing/service.yml
- name: Get an existing Service object
k8s_raw:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_raw:
api_version: v1
kind: ServiceList
namespace: testing
register: service_list
- name: Remove an existing Service object
k8s_raw:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s_raw:
state: present
src: /testing/deployment.yml
- name: Read definition file from the Ansible controller file system
k8s_raw:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s_raw:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when the I(kind) is a List type resource. Contains a set of objects.
returned: when resource is a List
type: list
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
EnTeQuAk/django-allauth | allauth/socialaccount/providers/facebook/south_migrations/0003_tosocialaccount.py | 82 | 8978 | # encoding: utf-8
from south.v2 import DataMigration
class Migration(DataMigration):
depends_on = (('socialaccount', '0002_genericmodels'),)
def forwards(self, orm):
# Migrate FB apps
app_id_to_sapp = {}
for app in orm.FacebookApp.objects.all():
sapp = orm['socialaccount.SocialApp'].objects \
.create(site=app.site,
provider='facebook',
name=app.name,
key=app.application_id,
secret=app.application_secret)
app_id_to_sapp[app.id] = sapp
# Migrate FB accounts
acc_id_to_sacc = {}
for acc in orm.FacebookAccount.objects.all():
sacc = acc.socialaccount_ptr
sacc.uid = acc.social_id
sacc.extra_data = { 'link': acc.link,
'name': acc.name }
sacc.provider = 'facebook'
sacc.save()
acc_id_to_sacc[acc.id] = sacc
# Migrate tokens
for token in orm.FacebookAccessToken.objects.all():
sapp = app_id_to_sapp[token.app.id]
sacc = acc_id_to_sacc[token.account.id]
orm['socialaccount.SocialToken'].objects \
.create(app=sapp,
account=sacc,
token=token.access_token,
token_secret='')
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook.facebookaccesstoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'FacebookAccessToken'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'facebook.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount', '_ormbases': ['socialaccount.SocialAccount']},
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'social_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'facebook.facebookapp': {
'Meta': {'object_name': 'FacebookApp'},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_secret': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount', 'facebook']
| mit |
asimonov-im/boinc | sched/assimilator.py | 28 | 10954 | #!/usr/bin/env python
'''
Generic Assimilator framework
'''
import os, re, signal, sys, time, hashlib
import boinc_path_config
from Boinc import database, boinc_db, boinc_project_path, configxml, sched_messages
# Peter Norvig's Abstract base class hack
def abstract():
"""
This function is not necessary, but provides
a nice error message when Abstract methods are not
overridden by child classes.
See: http://norvig.com/python-iaq.html for details.
"""
import inspect
# get the name of the calling function off the stack
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError(caller + ' must be implemented in subclass')
class Assimilator():
'''
Use this class to create new pure-Python Assimilators.
To create a new assimilator:
1) call __init__ from the new child class' __init__ method
2) override the assimilate_handler method
3) add the standard if __name__ == "__main__" bootstrap (see end of this file)
'''
def __init__(self):
# Be sure to call Assimilator.__init__(self) from child classes
# HACK: this belongs in boinc_db.py!
boinc_db.WU_ERROR_NO_CANONICAL_RESULT = 32
# initialize member vars
self.config = None
self.STOP_TRIGGER_FILENAME = boinc_project_path.project_path('stop_daemons')
self.caught_sig_int = False
self.log=sched_messages.SchedMessages()
self.pass_count = 0
self.update_db = True
self.noinsert = False
self.wu_id_mod = 0
self.wu_id_remainder = 0
self.one_pass = False
self.one_pass_N_WU = 0
self.appname = ''
self.sleep_interval = 10
def check_stop_trigger(self):
"""
Stops the daemon when not running in one_pass mode
There are two cases when the daemon will stop:
1) if the SIGINT signal is received
2) if the stop trigger file is present
"""
try:
junk = open(self.STOP_TRIGGER_FILENAME, 'r')
except IOError:
if self.caught_sig_int:
self.logCritical("Caught SIGINT\n")
sys.exit(1)
else:
self.logCritical("Found stop trigger\n")
sys.exit(1)
def sigint_handler(self, sig, stack):
"""
This method handles the SIGINT signal. It sets a flag
but waits to exit until check_stop_trigger is called
"""
self.logDebug("Handled SIGINT\n")
self.caught_sig_int = True
def filename_hash(self, name, hash_fanout):
"""
Accepts a filename (without path) and the hash fanout.
Returns the directory bucket where the file will reside.
The hash fanout is typically provided by the project config file.
"""
h = hex(int(hashlib.md5(name).hexdigest()[:8], 16) % hash_fanout)[2:]
# check for the long L suffix. It seems like it should
# never be present but that isn't the case
if h.endswith('L'):
h = h[:-1]
return h
def get_file_path(self, result):
"""
Accepts a result object and returns the relative path to the file.
This method accounts for file hashing and includes the directory
bucket in the path returned.
"""
name = re.search('<file_name>(.*)</file_name>',result.xml_doc_in).group(1)
fanout = int(self.config.uldl_dir_fanout)
hashed = self.filename_hash(name, fanout)
updir = self.config.upload_dir
result = os.path.join(updir,hashed,name)
return result
def assimilate_handler(self, wu, results, canonical_result):
"""
This method is called for each workunit (wu) that needs to be
processed. A canonical result is not guarenteed and several error
conditions may be present on the wu. Call report_errors(wu) when
overriding this method.
Note that the -noinsert flag (self.noinsert) must be accounted for when
overriding this method.
"""
abstract()
def report_errors(self, wu):
"""
Writes error logs based on the workunit (wu) error_mask field.
Returns True if errors were present, False otherwise.
"""
if wu.error_mask&boinc_db.WU_ERROR_COULDNT_SEND_RESULT:
self.logCritical("[%s] Error: couldn't send a result\n", wu.name)
return True
if wu.error_mask&boinc_db.WU_ERROR_TOO_MANY_ERROR_RESULTS:
self.logCritical("[%s] Error: too many error results\n", wu.name)
return True
if wu.error_mask&boinc_db.WU_ERROR_TOO_MANY_TOTAL_RESULTS:
self.logCritical("[%s] Error: too many total results\n", wu.name)
return True
if wu.error_mask&boinc_db.WU_ERROR_TOO_MANY_SUCCESS_RESULTS:
self.logCritical("[%s] Error: too many success results\n", wu.name)
return True
return False
def do_pass(self, app):
"""
This method scans the database for workunits that need to be
assimilated. It handles all processing rules passed in on the command
line, except for -noinsert, which must be handled in assimilate_handler.
Calls check_stop_trigger before doing any work.
"""
did_something=False
# check for stop trigger
self.check_stop_trigger()
self.pass_count += 1
n = 0
units = database.Workunits.find(app=app,assimilate_state=boinc_db.ASSIMILATE_READY)
self.logDebug("pass %d, units %d\n", self.pass_count, len(units))
# look for workunits with correct appid and
# assimilate_state==ASSIMILATE_READY
for wu in units:
# if the user has turned on the WU mod flag, adhere to it
if self.wu_id_mod > 0 and self.wu_id_remainder > 0:
if wu.id % self.wu_id_mod != self.wu_id_remainder:
continue
# track how many jobs have been processed
# stop if the limit is reached
n += 1
if self.one_pass_N_WU > 0 and n > self.one_pass_N_WU:
return did_something
# only mark as dirty if the database is modified
if self.update_db:
did_something=True
canonical_result = None
results = None
self.logDebug("[%s] assimilating: state=%d\n", wu.name, wu.assimilate_state)
results = database.Results.find(workunit=wu)
# look for canonical result for workunit in results
for result in results:
if result == wu.canonical_result:
canonical_result=result
if canonical_result == None and wu.error_mask == 0:
# If no canonical result found and WU had no other errors,
# something is wrong, e.g. result records got deleted prematurely.
# This is probably unrecoverable, so mark the WU as having
# an assimilation error and keep going.
wu.error_mask = boinc_db.WU_ERROR_NO_CANONICAL_RESULT
wu.commit()
# assimilate handler
self.assimilate_handler(wu, results, canonical_result)
# TODO: check for DEFER_ASSIMILATION as a return value from assimilate_handler
if self.update_db:
# tag wu as ASSIMILATE_DONE
wu.assimilate_state = boinc_db.ASSIMILATE_DONE
wu.transition_time = int(time.time())
wu.commit()
# return did something result
return did_something
def parse_args(self, args):
"""
Parses arguments provided on the command line and sets
those argument values as member variables. Arguments
are parsed as their true types, so integers will be ints,
not strings.
"""
args.reverse()
while(len(args)):
arg = args.pop()
if arg == '-sleep_interval':
arg = args.pop()
self.sleep_interval = float(arg)
elif arg == '-one_pass':
self.one_pass = True
elif arg == '-one_pass_N_WU':
arg = args.pop()
self.one_pass_N_WU = int(arg)
elif arg == '-noinsert':
self.noinsert = True
elif arg == '-dont_update_db':
self.update_db = False
elif arg == '-mod':
self.wu_id_mod = int(args.pop())
self.wu_id_remainder = int(args.pop())
elif arg == '-d':
arg = args.pop()
self.log.set_debug_level(arg)
elif arg == '-app':
arg = args.pop()
self.appname = arg
else:
self.logCritical("Unrecognized arg: %s\n", arg)
def run(self):
"""
This function runs the class in a loop unless the
one_pass or one_pass_WU_N flags are set. Before execution
parse_args() is called, the xml config file is loaded and
the SIGINT signal is hooked to the sigint_handler method.
"""
self.parse_args(sys.argv[1:])
self.config = configxml.default_config().config
# retrieve app where name = app.name
database.connect()
app=database.Apps.find1(name=self.appname)
database.close()
signal.signal(signal.SIGINT, self.sigint_handler)
# do one pass or execute main loop
if self.one_pass:
self.do_pass(app)
else:
# main loop
while(1):
database.connect()
workdone = self.do_pass(app)
database.close()
if not workdone:
time.sleep(self.sleep_interval)
def _writeLog(self, mode, *args):
"""
A private helper function for writeing to the log
"""
self.log.printf(mode, *args)
def logCritical(self, *messageArgs):
"""
A helper function for logging critical messages
"""
self._writeLog(sched_messages.CRITICAL, *messageArgs)
def logNormal(self, *messageArgs):
"""
A helper function for logging normal messages
"""
self._writeLog(sched_messages.NORMAL, *messageArgs)
def logDebug(self, *messageArgs):
"""
A helper function for logging debug messages
"""
self._writeLog(sched_messages.DEBUG, *messageArgs)
# --------------------------------------------
# Add the following to your assimilator file:
#if __name__ == '__main__':
# asm = YourAssimilator()
# asm.run()
| gpl-3.0 |
ligaturee/ansible-modules-extras | cloud/centurylink/clc_server.py | 49 | 50314 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_server
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
version_added: "2.0"
options:
additional_disks:
description:
- The list of additional disks for the server
required: False
default: []
add_public_ip:
description:
- Whether to add a public ip to the server
required: False
default: False
choices: [False, True]
alias:
description:
- The account alias to provision the servers under.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
required: False
default: None
anti_affinity_policy_name:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
required: False
default: None
alert_policy_id:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
required: False
default: None
alert_policy_name:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
required: False
default: None
count:
description:
- The number of servers to build (mutually exclusive with exact_count)
required: False
default: 1
count_group:
description:
- Required when exact_count is specified. The Server Group use to determine how many severs to deploy.
required: False
default: None
cpu:
description:
- How many CPUs to provision on the server
default: 1
required: False
cpu_autoscale_policy_id:
description:
- The autoscale policy to assign to the server.
default: None
required: False
custom_fields:
description:
- The list of custom fields to set on the server.
default: []
required: False
description:
description:
- The description to set for the server.
default: None
required: False
exact_count:
description:
- Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
creating and deleting them to reach that count. Requires count_group to be set.
default: None
required: False
group:
description:
- The Server Group to create servers under.
default: 'Default Group'
required: False
ip_address:
description:
- The IP Address for the server. One is assigned if not provided.
default: None
required: False
location:
description:
- The Datacenter to create servers in.
default: None
required: False
managed_os:
description:
- Whether to create the server as 'Managed' or not.
default: False
required: False
choices: [True, False]
memory:
description:
- Memory in GB.
default: 1
required: False
name:
description:
- A 1 to 6 character identifier to use for the server. This is required when state is 'present'
default: None
required: False
network_id:
description:
- The network UUID on which to create servers.
default: None
required: False
packages:
description:
- The list of blue print packages to run on the server after its created.
default: []
required: False
password:
description:
- Password for the administrator / root user
default: None
required: False
primary_dns:
description:
- Primary DNS used by the server.
default: None
required: False
public_ip_protocol:
description:
- The protocol to use for the public ip if add_public_ip is set to True.
default: 'TCP'
choices: ['TCP', 'UDP', 'ICMP']
required: False
public_ip_ports:
description:
- A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
default: []
required: False
secondary_dns:
description:
- Secondary DNS used by the server.
default: None
required: False
server_ids:
description:
- Required for started, stopped, and absent states.
A list of server Ids to insure are started, stopped, or absent.
default: []
required: False
source_server_password:
description:
- The password for the source server if a clone is specified.
default: None
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'started', 'stopped']
storage_type:
description:
- The type of storage to attach to the server.
default: 'standard'
required: False
choices: ['standard', 'hyperscale']
template:
description:
- The template to use for server creation. Will search for a template if a partial string is provided.
This is required when state is 'present'
default: None
required: false
ttl:
description:
- The time to live for the server in seconds. The server will be deleted when this time expires.
default: None
required: False
type:
description:
- The type of server to create.
default: 'standard'
required: False
choices: ['standard', 'hyperscale']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Provision a single Ubuntu Server
clc_server:
name: test
template: ubuntu-14-64
count: 1
group: 'Default Group'
state: present
- name: Ensure 'Default Group' has exactly 5 servers
clc_server:
name: test
template: ubuntu-14-64
exact_count: 5
count_group: 'Default Group'
group: 'Default Group'
- name: Stop a Server
clc_server:
server_ids: ['UC1ACCTTEST01']
state: stopped
- name: Start a Server
clc_server:
server_ids: ['UC1ACCTTEST01']
state: started
- name: Delete a Server
clc_server:
server_ids: ['UC1ACCTTEST01']
state: absent
'''
__version__ = '${version}'
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
changed = False
new_server_ids = []
server_dict_array = []
self._set_clc_credentials_from_env()
self.module.params = self._validate_module_params(
self.clc,
self.module)
p = self.module.params
state = p.get('state')
#
# Handle each state
#
partial_servers_ids = []
if state == 'absent':
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to delete: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._delete_servers(module=self.module,
clc=self.clc,
server_ids=server_ids)
elif state in ('started', 'stopped'):
server_ids = p.get('server_ids')
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of servers to run: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._start_stop_servers(self.module,
self.clc,
server_ids)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not p.get('template'):
return self.module.fail_json(
msg='template parameter is required for new instance')
if p.get('exact_count') is None:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._create_servers(self.module,
self.clc)
else:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._enforce_count(self.module,
self.clc)
self.module.exit_json(
changed=changed,
server_ids=new_server_ids,
partially_created_server_ids=partial_servers_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(),
template=dict(),
group=dict(default='Default Group'),
network_id=dict(),
location=dict(default=None),
cpu=dict(default=1),
memory=dict(default=1),
alias=dict(default=None),
password=dict(default=None, no_log=True),
ip_address=dict(default=None),
storage_type=dict(
default='standard',
choices=[
'standard',
'hyperscale']),
type=dict(default='standard', choices=['standard', 'hyperscale']),
primary_dns=dict(default=None),
secondary_dns=dict(default=None),
additional_disks=dict(type='list', default=[]),
custom_fields=dict(type='list', default=[]),
ttl=dict(default=None),
managed_os=dict(type='bool', default=False),
description=dict(default=None),
source_server_password=dict(default=None),
cpu_autoscale_policy_id=dict(default=None),
anti_affinity_policy_id=dict(default=None),
anti_affinity_policy_name=dict(default=None),
alert_policy_id=dict(default=None),
alert_policy_name=dict(default=None),
packages=dict(type='list', default=[]),
state=dict(
default='present',
choices=[
'present',
'absent',
'started',
'stopped']),
count=dict(type='int', default=1),
exact_count=dict(type='int', default=None),
count_group=dict(),
server_ids=dict(type='list', default=[]),
add_public_ip=dict(type='bool', default=False),
public_ip_protocol=dict(
default='TCP',
choices=[
'TCP',
'UDP',
'ICMP']),
public_ip_ports=dict(type='list', default=[]),
wait=dict(type='bool', default=True))
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name'],
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _validate_module_params(clc, module):
"""
Validate the module params, and lookup default values.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: dictionary of validated params
"""
params = module.params
datacenter = ClcServer._find_datacenter(clc, module)
ClcServer._validate_types(module)
ClcServer._validate_name(module)
params['alias'] = ClcServer._find_alias(clc, module)
params['cpu'] = ClcServer._find_cpu(clc, module)
params['memory'] = ClcServer._find_memory(clc, module)
params['description'] = ClcServer._find_description(module)
params['ttl'] = ClcServer._find_ttl(clc, module)
params['template'] = ClcServer._find_template_id(module, datacenter)
params['group'] = ClcServer._find_group(module, datacenter).id
params['network_id'] = ClcServer._find_network_id(module, datacenter)
params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
clc,
module)
params['alert_policy_id'] = ClcServer._find_alert_policy_id(
clc,
module)
return params
@staticmethod
def _find_datacenter(clc, module):
"""
Find the datacenter by calling the CLC API.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Datacenter instance
"""
location = module.params.get('location')
try:
datacenter = clc.v2.Datacenter(location)
return datacenter
except CLCException:
module.fail_json(
msg=str(
"Unable to find location: {0}".format(location)))
@staticmethod
def _find_alias(clc, module):
"""
Find or Validate the Account Alias by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Account instance
"""
alias = module.params.get('alias')
if not alias:
try:
alias = clc.v2.Account.GetAlias()
except CLCException as ex:
module.fail_json(msg='Unable to find account alias. {0}'.format(
ex.message
))
return alias
@staticmethod
def _find_cpu(clc, module):
"""
Find or validate the CPU value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for CPU
"""
cpu = module.params.get('cpu')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not cpu and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("cpu"):
cpu = group.Defaults("cpu")
else:
module.fail_json(
msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
return cpu
@staticmethod
def _find_memory(clc, module):
"""
Find or validate the Memory value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for Memory
"""
memory = module.params.get('memory')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not memory and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("memory"):
memory = group.Defaults("memory")
else:
module.fail_json(msg=str(
"Can\'t determine a default memory value. Please provide a value for memory."))
return memory
@staticmethod
def _find_description(module):
"""
Set the description module param to name if description is blank
:param module: the module to validate
:return: string description
"""
description = module.params.get('description')
if not description:
description = module.params.get('name')
return description
@staticmethod
def _validate_types(module):
"""
Validate that type and storage_type are set appropriately, and fail if not
:param module: the module to validate
:return: none
"""
state = module.params.get('state')
server_type = module.params.get(
'type').lower() if module.params.get('type') else None
storage_type = module.params.get(
'storage_type').lower() if module.params.get('storage_type') else None
if state == "present":
if server_type == "standard" and storage_type not in (
"standard", "premium"):
module.fail_json(
msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
if server_type == "hyperscale" and storage_type != "hyperscale":
module.fail_json(
msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
@staticmethod
def _validate_name(module):
"""
Validate that name is the correct length if provided, fail if it's not
:param module: the module to validate
:return: none
"""
server_name = module.params.get('name')
state = module.params.get('state')
if state == 'present' and (
len(server_name) < 1 or len(server_name) > 6):
module.fail_json(msg=str(
"When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
@staticmethod
def _find_ttl(clc, module):
"""
Validate that TTL is > 3600 if set, and fail if not
:param clc: clc-sdk instance to use
:param module: module to validate
:return: validated ttl
"""
ttl = module.params.get('ttl')
if ttl:
if ttl <= 3600:
return module.fail_json(msg=str("Ttl cannot be <= 3600"))
else:
ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
return ttl
@staticmethod
def _find_template_id(module, datacenter):
"""
Find the template id by calling the CLC API.
:param module: the module to validate
:param datacenter: the datacenter to search for the template
:return: a valid clc template id
"""
lookup_template = module.params.get('template')
state = module.params.get('state')
result = None
if state == 'present':
try:
result = datacenter.Templates().Search(lookup_template)[0].id
except CLCException:
module.fail_json(
msg=str(
"Unable to find a template: " +
lookup_template +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_network_id(module, datacenter):
"""
Validate the provided network id or return a default.
:param module: the module to validate
:param datacenter: the datacenter to search for a network id
:return: a valid network id
"""
network_id = module.params.get('network_id')
if not network_id:
try:
network_id = datacenter.Networks().networks[0].id
# -- added for clc-sdk 2.23 compatibility
# datacenter_networks = clc_sdk.v2.Networks(
# networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
# network_id = datacenter_networks.networks[0].id
# -- end
except CLCException:
module.fail_json(
msg=str(
"Unable to find a network in location: " +
datacenter.id))
return network_id
@staticmethod
def _find_aa_policy_id(clc, module):
"""
Validate if the anti affinity policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: aa_policy_id: the anti affinity policy id of the given name.
"""
aa_policy_id = module.params.get('anti_affinity_policy_id')
aa_policy_name = module.params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
alias = module.params.get('alias')
aa_policy_id = ClcServer._get_anti_affinity_policy_id(
clc,
module,
alias,
aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _find_alert_policy_id(clc, module):
"""
Validate if the alert policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: alert_policy_id: the alert policy id of the given name.
"""
alert_policy_id = module.params.get('alert_policy_id')
alert_policy_name = module.params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alias = module.params.get('alias')
alert_policy_id = ClcServer._get_alert_policy_id_by_name(
clc=clc,
module=module,
alias=alias,
alert_policy_name=alert_policy_name
)
if not alert_policy_id:
module.fail_json(
msg='No alert policy exist with name : %s' % alert_policy_name)
return alert_policy_id
def _create_servers(self, module, clc, override_count=None):
"""
Create New Servers in CLC cloud
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created
"""
p = module.params
request_list = []
servers = []
server_dict_array = []
created_server_ids = []
partial_created_servers_ids = []
add_public_ip = p.get('add_public_ip')
public_ip_protocol = p.get('public_ip_protocol')
public_ip_ports = p.get('public_ip_ports')
params = {
'name': p.get('name'),
'template': p.get('template'),
'group_id': p.get('group'),
'network_id': p.get('network_id'),
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'alias': p.get('alias'),
'password': p.get('password'),
'ip_address': p.get('ip_address'),
'storage_type': p.get('storage_type'),
'type': p.get('type'),
'primary_dns': p.get('primary_dns'),
'secondary_dns': p.get('secondary_dns'),
'additional_disks': p.get('additional_disks'),
'custom_fields': p.get('custom_fields'),
'ttl': p.get('ttl'),
'managed_os': p.get('managed_os'),
'description': p.get('description'),
'source_server_password': p.get('source_server_password'),
'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'packages': p.get('packages')
}
count = override_count if override_count else p.get('count')
changed = False if count == 0 else True
if not changed:
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
for i in range(0, count):
if not module.check_mode:
req = self._create_clc_server(clc=clc,
module=module,
server_params=params)
server = req.requests[0].Server()
request_list.append(req)
servers.append(server)
self._wait_for_requests(module, request_list)
self._refresh_servers(module, servers)
ip_failed_servers = self._add_public_ip_to_servers(
module=module,
should_add_public_ip=add_public_ip,
servers=servers,
public_ip_protocol=public_ip_protocol,
public_ip_ports=public_ip_ports)
ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
module=module,
servers=servers)
for server in servers:
if server in ip_failed_servers or server in ap_failed_servers:
partial_created_servers_ids.append(server.id)
else:
# reload server details
server = clc.v2.Server(server.id)
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
if add_public_ip and len(server.PublicIPs().public_ips) > 0:
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
created_server_ids.append(server.id)
server_dict_array.append(server.data)
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
def _enforce_count(self, module, clc):
"""
Enforce that there is the right number of servers in the provided group.
Starts or stops servers as necessary.
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created or deleted
"""
p = module.params
changed = False
count_group = p.get('count_group')
datacenter = ClcServer._find_datacenter(clc, module)
exact_count = p.get('exact_count')
server_dict_array = []
partial_servers_ids = []
changed_server_ids = []
# fail here if the exact count was specified without filtering
# on a group, as this may lead to a undesired removal of instances
if exact_count and count_group is None:
return module.fail_json(
msg="you must use the 'count_group' option with exact_count")
servers, running_servers = ClcServer._find_running_servers_by_group(
module, datacenter, count_group)
if len(running_servers) == exact_count:
changed = False
elif len(running_servers) < exact_count:
to_create = exact_count - len(running_servers)
server_dict_array, changed_server_ids, partial_servers_ids, changed \
= self._create_servers(module, clc, override_count=to_create)
for server in server_dict_array:
running_servers.append(server)
elif len(running_servers) > exact_count:
to_remove = len(running_servers) - exact_count
all_server_ids = sorted([x.id for x in running_servers])
remove_ids = all_server_ids[0:to_remove]
(changed, server_dict_array, changed_server_ids) \
= ClcServer._delete_servers(module, clc, remove_ids)
return server_dict_array, changed_server_ids, partial_servers_ids, changed
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
@staticmethod
def _add_public_ip_to_servers(
module,
should_add_public_ip,
servers,
public_ip_protocol,
public_ip_ports):
"""
Create a public IP for servers
:param module: the AnsibleModule object
:param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
:param servers: List of servers to add public ips to
:param public_ip_protocol: a protocol to allow for the public ips
:param public_ip_ports: list of ports to allow for the public ips
:return: none
"""
failed_servers = []
if not should_add_public_ip:
return failed_servers
ports_lst = []
request_list = []
server = None
for port in public_ip_ports:
ports_lst.append(
{'protocol': public_ip_protocol, 'port': port})
try:
if not module.check_mode:
for server in servers:
request = server.PublicIPs().Add(ports_lst)
request_list.append(request)
except APIFailedResponse:
failed_servers.append(server)
ClcServer._wait_for_requests(module, request_list)
return failed_servers
@staticmethod
def _add_alert_policy_to_servers(clc, module, servers):
"""
Associate the alert policy to servers
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param servers: List of servers to add alert policy to
:return: failed_servers: the list of servers which failed while associating alert policy
"""
failed_servers = []
p = module.params
alert_policy_id = p.get('alert_policy_id')
alias = p.get('alias')
if alert_policy_id and not module.check_mode:
for server in servers:
try:
ClcServer._add_alert_policy_to_server(
clc=clc,
alias=alias,
server_id=server.id,
alert_policy_id=alert_policy_id)
except CLCException:
failed_servers.append(server)
return failed_servers
@staticmethod
def _add_alert_policy_to_server(
clc, alias, server_id, alert_policy_id):
"""
Associate an alert policy to a clc server
:param clc: the clc-sdk instance to use
:param alias: the clc account alias
:param server_id: The clc server id
:param alert_policy_id: the alert policy id to be associated to the server
:return: none
"""
try:
clc.v2.API.Call(
method='POST',
url='servers/%s/%s/alertPolicies' % (alias, server_id),
payload=json.dumps(
{
'id': alert_policy_id
}))
except APIFailedResponse as e:
raise CLCException(
'Failed to associate alert policy to the server : {0} with Error {1}'.format(
server_id, str(e.response_text)))
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
Returns the alert policy id for the given alert policy name
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the clc account alias
:param alert_policy_name: the name of the alert policy
:return: alert_policy_id: the alert policy id
"""
alert_policy_id = None
policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
if not policies:
return alert_policy_id
for policy in policies.get('items'):
if policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _delete_servers(module, clc, server_ids):
"""
Delete the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to delete
:return: a list of dictionaries with server information about the servers that were deleted
"""
terminated_server_ids = []
server_dict_array = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if not module.check_mode:
request_list.append(server.Delete())
ClcServer._wait_for_requests(module, request_list)
for server in servers:
terminated_server_ids.append(server.id)
return True, server_dict_array, terminated_server_ids
@staticmethod
def _start_stop_servers(module, clc, server_ids):
"""
Start or Stop the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to start or stop
:return: a list of dictionaries with server information about the servers that were started or stopped
"""
p = module.params
state = p.get('state')
changed = False
changed_servers = []
server_dict_array = []
result_server_ids = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if server.powerState != state:
changed_servers.append(server)
if not module.check_mode:
request_list.append(
ClcServer._change_server_power_state(
module,
server,
state))
changed = True
ClcServer._wait_for_requests(module, request_list)
ClcServer._refresh_servers(module, changed_servers)
for server in set(changed_servers + servers):
try:
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
except (KeyError, IndexError):
pass
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
@staticmethod
def _change_server_power_state(module, server, state):
"""
Change the server powerState
:param module: the module to check for intended state
:param server: the server to start or stop
:param state: the intended powerState for the server
:return: the request object from clc-sdk call
"""
result = None
try:
if state == 'started':
result = server.PowerOn()
else:
result = server.PowerOff()
except CLCException:
module.fail_json(
msg='Unable to change power state for server {0}'.format(
server.id))
return result
@staticmethod
def _find_running_servers_by_group(module, datacenter, count_group):
"""
Find a list of running servers in the provided group
:param module: the AnsibleModule object
:param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
:param count_group: the group to count the servers
:return: list of servers, and list of running servers
"""
group = ClcServer._find_group(
module=module,
datacenter=datacenter,
lookup_group=count_group)
servers = group.Servers().Servers()
running_servers = []
for server in servers:
if server.status == 'active' and server.powerState == 'started':
running_servers.append(server)
return servers, running_servers
@staticmethod
def _find_group(module, datacenter, lookup_group=None):
"""
Find a server group in a datacenter by calling the CLC API
:param module: the AnsibleModule instance
:param datacenter: clc-sdk.Datacenter instance to search for the group
:param lookup_group: string name of the group to search for
:return: clc-sdk.Group instance
"""
if not lookup_group:
lookup_group = module.params.get('group')
try:
return datacenter.Groups().Get(lookup_group)
except CLCException:
pass
# The search above only acts on the main
result = ClcServer._find_group_recursive(
module,
datacenter.Groups(),
lookup_group)
if result is None:
module.fail_json(
msg=str(
"Unable to find group: " +
lookup_group +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_group_recursive(module, group_list, lookup_group):
"""
Find a server group by recursively walking the tree
:param module: the AnsibleModule instance to use
:param group_list: a list of groups to search
:param lookup_group: the group to look for
:return: list of groups
"""
result = None
for group in group_list.groups:
subgroups = group.Subgroups()
try:
return subgroups.Get(lookup_group)
except CLCException:
result = ClcServer._find_group_recursive(
module,
subgroups,
lookup_group)
if result is not None:
break
return result
@staticmethod
def _create_clc_server(
clc,
module,
server_params):
"""
Call the CLC Rest API to Create a Server
:param clc: the clc-python-sdk instance to use
:param module: the AnsibleModule instance to use
:param server_params: a dictionary of params to use to create the servers
:return: clc-sdk.Request object linked to the queued server request
"""
try:
res = clc.v2.API.Call(
method='POST',
url='servers/%s' %
(server_params.get('alias')),
payload=json.dumps(
{
'name': server_params.get('name'),
'description': server_params.get('description'),
'groupId': server_params.get('group_id'),
'sourceServerId': server_params.get('template'),
'isManagedOS': server_params.get('managed_os'),
'primaryDNS': server_params.get('primary_dns'),
'secondaryDNS': server_params.get('secondary_dns'),
'networkId': server_params.get('network_id'),
'ipAddress': server_params.get('ip_address'),
'password': server_params.get('password'),
'sourceServerPassword': server_params.get('source_server_password'),
'cpu': server_params.get('cpu'),
'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
'memoryGB': server_params.get('memory'),
'type': server_params.get('type'),
'storageType': server_params.get('storage_type'),
'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
'customFields': server_params.get('custom_fields'),
'additionalDisks': server_params.get('additional_disks'),
'ttl': server_params.get('ttl'),
'packages': server_params.get('packages')}))
result = clc.v2.Requests(res)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
server_params.get('name'),
ex.response_text
))
#
# Patch the Request object so that it returns a valid server
# Find the server's UUID from the API response
server_uuid = [obj['id']
for obj in res['links'] if obj['rel'] == 'self'][0]
# Change the request server method to a _find_server_by_uuid closure so
# that it will work
result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
clc,
module,
server_uuid,
server_params.get('alias'))
return result
@staticmethod
def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
alias, ex.response_text))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
return aa_policy_id
#
# This is the function that gets patched to the Request.server object using a lamda closure
#
@staticmethod
def _find_server_by_uuid_w_retry(
clc, module, svr_uuid, alias=None, retries=5, back_out=2):
"""
Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param svr_uuid: UUID of the server
:param retries: the number of retry attempts to make prior to fail. default is 5
:param alias: the Account Alias to search
:return: a clc-sdk.Server instance
"""
if not alias:
alias = clc.v2.Account.GetAlias()
# Wait and retry if the api returns a 404
while True:
retries -= 1
try:
server_obj = clc.v2.API.Call(
method='GET', url='servers/%s/%s?uuid=true' %
(alias, svr_uuid))
server_id = server_obj['id']
server = clc.v2.Server(
id=server_id,
alias=alias,
server_obj=server_obj)
return server
except APIFailedResponse as e:
if e.response_status_code != 404:
return module.fail_json(
msg='A failure response was received from CLC API when '
'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
(svr_uuid, e.response_status_code, e.message))
if retries == 0:
return module.fail_json(
msg='Unable to reach the CLC API after 5 attempts')
sleep(back_out)
back_out *= 2
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_server = ClcServer(module)
clc_server.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
jyotikamboj/container | django/contrib/gis/db/backends/postgis/creation.py | 87 | 3505 | from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.connection.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.connection.template_postgis),)
return ''
| mit |
pli3/enigma2-pli | lib/python/Plugins/Extensions/DVDPlayer/plugin.py | 9 | 3144 | import os
from Components.config import config
from Tools.Directories import pathExists, fileExists
from Plugins.Plugin import PluginDescriptor
from Components.Harddisk import harddiskmanager
detected_DVD = None
def main(session, **kwargs):
from Screens import DVD
session.open(DVD.DVDPlayer)
def play(session, **kwargs):
from Screens import DVD
session.open(DVD.DVDPlayer, dvd_device=harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD()))
def DVDPlayer(*args, **kwargs):
# for backward compatibility with plugins that do "from DVDPlayer.plugin import DVDPlayer"
from Screens import DVD
return DVD.DVDPlayer(*args, **kwargs)
def DVDOverlay(*args, **kwargs):
# for backward compatibility with plugins that do "from DVDPlayer.plugin import DVDOverlay"
from Screens import DVD
return DVD.DVDOverlay(*args, **kwargs)
def filescan_open(list, session, **kwargs):
from Screens import DVD
if len(list) == 1 and list[0].mimetype == "video/x-dvd":
splitted = list[0].path.split('/')
print "splitted", splitted
if len(splitted) > 2:
if splitted[1] == 'autofs':
session.open(DVD.DVDPlayer, dvd_device="/dev/%s" %(splitted[2]))
return
else:
print "splitted[0]", splitted[1]
else:
dvd_filelist = []
for x in list:
if x.mimetype == "video/x-dvd-iso":
dvd_filelist.append(x.path)
if x.mimetype == "video/x-dvd":
dvd_filelist.append(x.path.rsplit('/',1)[0])
session.open(DVD.DVDPlayer, dvd_filelist=dvd_filelist)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
# Overwrite checkFile to only detect local
class LocalScanner(Scanner):
def checkFile(self, file):
return fileExists(file.path)
return [
LocalScanner(mimetypes = ["video/x-dvd","video/x-dvd-iso"],
paths_to_scan =
[
ScanPath(path = "video_ts", with_subdirs = False),
ScanPath(path = "VIDEO_TS", with_subdirs = False),
ScanPath(path = "", with_subdirs = False),
],
name = "DVD",
description = _("Play DVD"),
openfnc = filescan_open,
)]
def onPartitionChange(action, partition):
print "[@] onPartitionChange", action, partition
if partition != harddiskmanager.getCD():
global detected_DVD
if action == 'remove':
print "[@] DVD removed"
detected_DVD = False
elif action == 'add':
print "[@] DVD Inserted"
detected_DVD = None
def menu(menuid, **kwargs):
if menuid == "mainmenu":
global detected_DVD
if detected_DVD is None:
cd = harddiskmanager.getCD()
if cd and os.path.exists(os.path.join(harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD()), "VIDEO_TS")):
detected_DVD = True
else:
detected_DVD = False
if onPartitionChange not in harddiskmanager.on_partition_list_change:
harddiskmanager.on_partition_list_change.append(onPartitionChange)
if detected_DVD:
return [(_("DVD player"), play, "dvd_player", 46)]
return []
def Plugins(**kwargs):
return [PluginDescriptor(where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan),
PluginDescriptor(name = "DVDPlayer", description = "Play DVDs", where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu)]
| gpl-2.0 |
mandeepdhami/horizon | openstack_dashboard/dashboards/admin/metering/views.py | 51 | 6870 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse # noqa
from django.utils.translation import ugettext_lazy as _
import django.views
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import csvbase
from openstack_dashboard.api import ceilometer
from openstack_dashboard.dashboards.admin.metering import forms as \
metering_forms
from openstack_dashboard.dashboards.admin.metering import tabs as \
metering_tabs
from openstack_dashboard.utils import metering as metering_utils
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabbedTableView):
tab_group_class = metering_tabs.CeilometerOverviewTabs
template_name = 'admin/metering/index.html'
page_title = _("Resources Usage Overview")
class CreateUsageReport(forms.ModalFormView):
form_class = metering_forms.UsageReportForm
template_name = 'admin/metering/daily.html'
success_url = reverse_lazy('horizon:admin:metering:index')
page_title = _("Modify Usage Report Parameters")
class SamplesView(django.views.generic.TemplateView):
def get(self, request, *args, **kwargs):
meter = request.GET.get('meter', None)
if not meter:
return HttpResponse(json.dumps({}),
content_type='application/json')
meter_name = meter.replace(".", "_")
date_options = request.GET.get('date_options', None)
date_from = request.GET.get('date_from', None)
date_to = request.GET.get('date_to', None)
stats_attr = request.GET.get('stats_attr', 'avg')
group_by = request.GET.get('group_by', None)
try:
date_from, date_to = metering_utils.calc_date_args(date_from,
date_to,
date_options)
except Exception:
exceptions.handle(self.request, _('Dates cannot be recognized.'))
if group_by == 'project':
query = metering_utils.ProjectAggregatesQuery(request,
date_from,
date_to,
3600 * 24)
else:
query = metering_utils.MeterQuery(request, date_from,
date_to, 3600 * 24)
resources, unit = query.query(meter)
series = metering_utils.series_for_meter(request, resources,
group_by, meter,
meter_name, stats_attr, unit)
series = metering_utils.normalize_series_by_unit(series)
ret = {'series': series, 'settings': {}}
return HttpResponse(json.dumps(ret), content_type='application/json')
class CsvReportView(django.views.generic.View):
def get(self, request, **response_kwargs):
render_class = ReportCsvRenderer
response_kwargs.setdefault("filename", "usage.csv")
context = {'usage': load_report_data(request)}
resp = render_class(request=request,
template=None,
context=context,
content_type='csv',
**response_kwargs)
return resp
class ReportCsvRenderer(csvbase.BaseCsvResponse):
columns = [_("Project Name"), _("Meter"), _("Description"),
_("Service"), _("Time"), _("Value (Avg)"), _("Unit")]
def get_row_data(self):
for p in self.context['usage'].values():
for u in p:
yield (u["project"],
u["meter"],
u["description"],
u["service"],
u["time"],
u["value"],
u["unit"])
def load_report_data(request):
meters = ceilometer.Meters(request)
services = {
_('Nova'): meters.list_nova(),
_('Neutron'): meters.list_neutron(),
_('Glance'): meters.list_glance(),
_('Cinder'): meters.list_cinder(),
_('Swift_meters'): meters.list_swift(),
_('Kwapi'): meters.list_kwapi(),
_('IPMI'): meters.list_ipmi(),
}
project_rows = {}
date_options = request.GET.get('date_options', 7)
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
try:
date_from, date_to = metering_utils.calc_date_args(date_from,
date_to,
date_options)
except Exception:
exceptions.handle(request, _('Dates cannot be recognized.'))
try:
project_aggregates = metering_utils.ProjectAggregatesQuery(request,
date_from,
date_to,
3600 * 24)
except Exception:
exceptions.handle(request,
_('Unable to retrieve project list.'))
for meter in meters._cached_meters.values():
service = None
for name, m_list in services.items():
if meter in m_list:
service = name
break
res, unit = project_aggregates.query(meter.name)
for r in res:
values = r.get_meter(meter.name.replace(".", "_"))
if values:
for value in values:
row = {"name": 'none',
"project": r.id,
"meter": meter.name,
"description": meter.description,
"service": service,
"time": value._apiresource.period_end,
"value": value._apiresource.avg,
"unit": meter.unit}
if r.id not in project_rows:
project_rows[r.id] = [row]
else:
project_rows[r.id].append(row)
return project_rows
| apache-2.0 |
robertodr/autocmake | autocmake/generate.py | 5 | 8504 | def gen_cmake_command(config):
"""
Generate CMake command.
"""
from autocmake.extract import extract_list
s = []
s.append("\n\ndef gen_cmake_command(options, arguments):")
s.append(' """')
s.append(" Generate CMake command based on options and arguments.")
s.append(' """')
s.append(" command = []")
for env in config['export']:
s.append(' command.append({0})'.format(env))
s.append(" command.append(arguments['--cmake-executable'])")
for definition in config['define']:
s.append(' command.append({0})'.format(definition))
s.append(" command.append('-DCMAKE_BUILD_TYPE={0}'.format(arguments['--type']))")
s.append(" command.append('-G\"{0}\"'.format(arguments['--generator']))")
s.append(" if arguments['--cmake-options'] != \"''\":")
s.append(" command.append(arguments['--cmake-options'])")
s.append(" if arguments['--prefix']:")
s.append(" command.append('-DCMAKE_INSTALL_PREFIX=\"{0}\"'.format(arguments['--prefix']))")
s.append("\n return ' '.join(command)")
return '\n'.join(s)
def autogenerated_notice():
from datetime import date
from . import __version__
current_year = date.today().year
year_range = '2015-{0}'.format(current_year)
s = []
s.append('# This file is autogenerated by Autocmake v{0} http://autocmake.org'.format(__version__))
s.append('# Copyright (c) {0} by Radovan Bast, Roberto Di Remigio, Jonas Juselius, and contributors.'.format(year_range))
return '\n'.join(s)
def gen_cmake_options_wrappers():
s = """\n# Options handling utilities
include(CMakeDependentOption)
# Macro for printing an option in a consistent manner
# Written by Lori A. Burns (@loriab) and Ryan M. Richard (@ryanmrichard)
# Syntax: print_option(<option to print> <was specified>)
macro(print_option variable default)
if(NOT DEFINED ${variable} OR "${${variable}}" STREQUAL "")
message(STATUS "Setting (unspecified) option ${variable}: ${default}")
else()
message(STATUS "Setting option ${variable}: ${${variable}}")
endif()
endmacro()
# Wraps an option with default ON/OFF. Adds nice messaging to option()
# Written by Lori A. Burns (@loriab) and Ryan M. Richard (@ryanmrichard)
# Syntax: option_with_print(<option name> <description> <default value>)
macro(option_with_print variable msge default)
print_option(${variable} ${default})
option(${variable} ${msge} ${default})
endmacro()
# Wraps an option with a default other than ON/OFF and prints it
# Written by Lori A. Burns (@loriab) and Ryan M. Richard (@ryanmrichard)
# NOTE: Can't combine with above b/c CMake handles ON/OFF options specially
# NOTE2: CMake variables are always defined so need to further check for if
# they are the NULL string. This is also why we need the force
# Syntax: option_with_default(<option name> <description> <default value>)
macro(option_with_default variable msge default)
print_option(${variable} "${default}")
if(NOT DEFINED ${variable} OR "${${variable}}" STREQUAL "")
set(${variable} "${default}" CACHE STRING ${msge} FORCE)
endif()
endmacro()"""
return s
def gen_setup(config, default_build_type, relative_path, setup_script_name):
"""
Generate setup script.
"""
from autocmake.extract import extract_list
s = []
s.append('#!/usr/bin/env python')
s.append('\n{0}'.format(autogenerated_notice()))
s.append('\nimport os')
s.append('import sys')
s.append('assert sys.version_info >= (2, 6), \'Python >= 2.6 is required\'')
s.append("\nsys.path.insert(0, '{0}')".format(relative_path))
s.append('from autocmake import configure')
s.append('from autocmake.external import docopt')
s.append('\n\noptions = """')
s.append('Usage:')
s.append(' ./{0} [options] [<builddir>]'.format(setup_script_name))
s.append(' ./{0} (-h | --help)'.format(setup_script_name))
s.append('\nOptions:')
options = []
for opt in config['docopt']:
first = opt.split()[0].strip()
rest = ' '.join(opt.split()[1:]).strip()
options.append([first, rest])
options.append(['--type=<TYPE>', 'Set the CMake build type (debug, release, relwithdebinfo, minsizerel) [default: {0}].'.format(default_build_type)])
options.append(['--generator=<STRING>', 'Set the CMake build system generator [default: Unix Makefiles].'])
options.append(['--show', 'Show CMake command and exit.'])
options.append(['--cmake-executable=<CMAKE_EXECUTABLE>', 'Set the CMake executable [default: cmake].'])
options.append(['--cmake-options=<STRING>', "Define options to CMake [default: '']."])
options.append(['--prefix=<PATH>', 'Set the install path for make install.'])
options.append(['<builddir>', 'Build directory.'])
options.append(['-h --help', 'Show this screen.'])
s.append(align_options(options))
s.append('"""')
s.append(gen_cmake_command(config))
s.append("\n")
s.append("# parse command line args")
s.append("try:")
s.append(" arguments = docopt.docopt(options, argv=None)")
s.append("except docopt.DocoptExit:")
s.append(r" sys.stderr.write('ERROR: bad input to {0}\n'.format(sys.argv[0]))")
s.append(" sys.stderr.write(options)")
s.append(" sys.exit(-1)")
s.append("\n")
s.append("# use extensions to validate/post-process args")
s.append("if configure.module_exists('extensions'):")
s.append(" import extensions")
s.append(" arguments = extensions.postprocess_args(sys.argv, arguments)")
s.append("\n")
s.append("root_directory = os.path.dirname(os.path.realpath(__file__))")
s.append("\n")
s.append("build_path = arguments['<builddir>']")
s.append("\n")
s.append("# create cmake command")
s.append("cmake_command = '{0} -H{1}'.format(gen_cmake_command(options, arguments), root_directory)")
s.append("\n")
s.append("# run cmake")
s.append("configure.configure(root_directory, build_path, cmake_command, arguments['--show'])")
return s
def gen_cmakelists(project_name, project_language, min_cmake_version, default_build_type, relative_path, modules):
"""
Generate CMakeLists.txt.
"""
import os
s = []
s.append(autogenerated_notice())
s.append('\n# set minimum cmake version')
s.append('cmake_minimum_required(VERSION {0} FATAL_ERROR)'.format(min_cmake_version))
s.append('\n# project name')
s.append('project({0} LANGUAGES {1})'.format(project_name, project_language))
s.append('\n# do not rebuild if rules (compiler flags) change')
s.append('set(CMAKE_SKIP_RULE_DEPENDENCY TRUE)')
build_type_capitalized = {'debug': 'Debug',
'release': 'Release',
'relwithdebinfo': 'RelWithDebInfo',
'minsizerel': 'MinSizeRel'}
_build_type = build_type_capitalized[default_build_type]
s.append('\n# if CMAKE_BUILD_TYPE undefined, we set it to {0}'.format(_build_type))
s.append('if(NOT CMAKE_BUILD_TYPE)')
s.append(' set(CMAKE_BUILD_TYPE "{0}")'.format(_build_type))
s.append('endif()')
s.append(gen_cmake_options_wrappers())
if len(modules) > 0:
s.append('\n# directories which hold included cmake modules')
module_paths = [module.path for module in modules]
module_paths.append('downloaded') # this is done to be able to find fetched modules when testing
module_paths = list(set(module_paths))
module_paths.sort() # we do this to always get the same order and to minimize diffs
for directory in module_paths:
rel_cmake_module_path = os.path.join(relative_path, directory)
# on windows cmake corrects this so we have to make it wrong again
rel_cmake_module_path = rel_cmake_module_path.replace('\\', '/')
s.append('list(APPEND CMAKE_MODULE_PATH ${{PROJECT_SOURCE_DIR}}/{0})'.format(rel_cmake_module_path))
if len(modules) > 0:
s.append('\n# included cmake modules')
for module in modules:
s.append('include({0})'.format(os.path.splitext(module.name)[0]))
return s
def align_options(options):
"""
Indents flags and aligns help texts.
"""
l = 0
for opt in options:
if len(opt[0]) > l:
l = len(opt[0])
s = []
for opt in options:
s.append(' {0}{1} {2}'.format(opt[0], ' ' * (l - len(opt[0])), opt[1]))
return '\n'.join(s)
| bsd-3-clause |
devanlai/pyOCD | pyOCD/target/target_kinetis.py | 5 | 6593 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM, DHCSR, DBGKEY, C_DEBUGEN, C_HALT
from pyOCD.target.target import TARGET_RUNNING
import logging
from time import sleep
MDM_STATUS = 0x01000000
MDM_CTRL = 0x01000004
MDM_IDR = 0x010000fc
MDM_STATUS_FLASH_MASS_ERASE_ACKNOWLEDGE = (1 << 0)
MDM_STATUS_FLASH_READY = (1 << 1)
MDM_STATUS_SYSTEM_SECURITY = (1 << 2)
MDM_STATUS_MASS_ERASE_ENABLE = (1 << 5)
MDM_STATUS_CORE_HALTED = (1 << 16)
MDM_CTRL_FLASH_MASS_ERASE_IN_PROGRESS = (1 << 0)
MDM_CTRL_DEBUG_REQUEST = (1 << 2)
MDM_CTRL_CORE_HOLD_RESET = (1 << 4)
# Kinetis FCF byte array to disable flash security.
fcf = [0xff] * 12
fcf += [0xfe, 0xff, 0xff, 0xff]
# Location of FCF in the memory map.
FCF_ADDR = 0x400
class Kinetis(CortexM):
def __init__(self, transport, memoryMap=None):
super(Kinetis, self).__init__(transport, memoryMap)
self.mdm_idr = 0
self.do_auto_unlock = True
def setAutoUnlock(self, doAutoUnlock):
self.do_auto_unlock = doAutoUnlock
def init(self):
CortexM.init(self, initial_setup=True, bus_accessible=False)
# check MDM-AP ID
val = self.transport.readAP(MDM_IDR)
if val != self.mdm_idr:
logging.error("%s: bad MDM-AP IDR (is 0x%08x, expected 0x%08x)", self.part_number, val, self.mdm_idr)
# check for flash security
isLocked = self.isLocked()
if isLocked:
if self.do_auto_unlock:
logging.warning("%s in secure state: will try to unlock via mass erase", self.part_number)
# keep the target in reset until is had been erased and halted
self.transport.assertReset(True)
if not self.massErase():
self.transport.assertReset(False)
logging.error("%s: mass erase failed", self.part_number)
raise Exception("unable to unlock device")
# Use the MDM to keep the target halted after reset has been released
self.transport.writeAP(MDM_CTRL, MDM_CTRL_DEBUG_REQUEST)
# Enable debug
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN)
self.transport.assertReset(False)
while self.transport.readAP(MDM_STATUS) & MDM_STATUS_CORE_HALTED != MDM_STATUS_CORE_HALTED:
logging.debug("Waiting for mdm halt (erase)")
sleep(0.01)
# release MDM halt once it has taken effect in the DHCSR
self.transport.writeAP(MDM_CTRL, 0)
isLocked = False
else:
logging.warning("%s in secure state: not automatically unlocking", self.part_number)
else:
logging.info("%s not in secure state", self.part_number)
# Can't do anything more if the target is secure
if isLocked:
return
if self.halt_on_connect:
# Prevent the target from resetting if it has invalid code
self.transport.writeAP(MDM_CTRL, MDM_CTRL_DEBUG_REQUEST | MDM_CTRL_CORE_HOLD_RESET)
while self.transport.readAP(MDM_CTRL) & (MDM_CTRL_DEBUG_REQUEST | MDM_CTRL_CORE_HOLD_RESET) != (MDM_CTRL_DEBUG_REQUEST | MDM_CTRL_CORE_HOLD_RESET):
self.transport.writeAP(MDM_CTRL, MDM_CTRL_DEBUG_REQUEST | MDM_CTRL_CORE_HOLD_RESET)
# Enable debug
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN)
# Disable holding the core in reset, leave MDM halt on
self.transport.writeAP(MDM_CTRL, MDM_CTRL_DEBUG_REQUEST)
# Wait until the target is halted
while self.transport.readAP(MDM_STATUS) & MDM_STATUS_CORE_HALTED != MDM_STATUS_CORE_HALTED:
logging.debug("Waiting for mdm halt")
sleep(0.01)
# release MDM halt once it has taken effect in the DHCSR
self.transport.writeAP(MDM_CTRL, 0)
# sanity check that the target is still halted
if self.getState() == TARGET_RUNNING:
raise Exception("Target failed to stay halted during init sequence")
CortexM.init(self, initial_setup=False, bus_accessible=True)
def isLocked(self):
val = self.transport.readAP(MDM_STATUS)
if val & MDM_STATUS_SYSTEM_SECURITY:
return True
else:
return False
## @brief Returns True if mass erase succeeded, False if it failed or is disabled.
# Note: reset should be held for the duration of this function
def massErase(self):
# Wait until flash is inited.
while True:
status = self.transport.readAP(MDM_STATUS)
if status & MDM_STATUS_FLASH_READY:
break
sleep(0.01)
# Check if mass erase is enabled.
status = self.transport.readAP(MDM_STATUS)
if not (status & MDM_STATUS_MASS_ERASE_ENABLE):
logging.error("Mass erase disabled. MDM status: 0x%x", status)
return False
# Set Flash Mass Erase in Progress bit to start erase.
self.transport.writeAP(MDM_CTRL, MDM_CTRL_FLASH_MASS_ERASE_IN_PROGRESS)
# Wait for Flash Mass Erase Acknowledge to be set.
while True:
val = self.transport.readAP(MDM_STATUS)
if val & MDM_STATUS_FLASH_MASS_ERASE_ACKNOWLEDGE:
break
sleep(0.01)
# Wait for Flash Mass Erase in Progress bit to clear when erase is completed.
while True:
val = self.transport.readAP(MDM_CTRL)
if (val == 0):
break
sleep(0.01)
# Confirm the part was unlocked
val = self.transport.readAP(MDM_STATUS)
if (val & MDM_STATUS_SYSTEM_SECURITY) == 0:
logging.warning("%s secure state: unlocked successfully", self.part_number)
return True
else:
logging.error("Failed to unlock. MDM status: 0x%x", val)
return False
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.