repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jorgeslima/files_copier | dependencies/tinydb/tests/test_tinydb.py | 2 | 12809 | # coding=utf-8
import sys
import pytest
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
from tinydb.middlewares import Middleware
def test_purge(db):
db.purge()
db.insert({})
db.purge()
assert len(db) == 0
def test_all(db):
db.purge()
for i in range(10):
db.insert({})
assert len(db.all()) == 10
def test_insert(db):
db.purge()
db.insert({'int': 1, 'char': 'a'})
assert db.count(where('int') == 1) == 1
db.purge()
db.insert({'int': 1, 'char': 'a'})
db.insert({'int': 1, 'char': 'b'})
db.insert({'int': 1, 'char': 'c'})
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'a') == 1
def test_insert_ids(db):
db.purge()
assert db.insert({'int': 1, 'char': 'a'}) == 1
assert db.insert({'int': 1, 'char': 'a'}) == 2
def test_insert_multiple(db):
db.purge()
assert not db.contains(where('int') == 1)
# Insert multiple from list
db.insert_multiple([{'int': 1, 'char': 'a'},
{'int': 1, 'char': 'b'},
{'int': 1, 'char': 'c'}])
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'a') == 1
# Insert multiple from generator function
def generator():
for j in range(10):
yield {'int': j}
db.purge()
db.insert_multiple(generator())
for i in range(10):
assert db.count(where('int') == i) == 1
assert db.count(where('int').exists()) == 10
# Insert multiple from inline generator
db.purge()
db.insert_multiple({'int': i} for i in range(10))
for i in range(10):
assert db.count(where('int') == i) == 1
def test_insert_multiple_with_ids(db):
db.purge()
# Insert multiple from list
assert db.insert_multiple([{'int': 1, 'char': 'a'},
{'int': 1, 'char': 'b'},
{'int': 1, 'char': 'c'}]) == [1, 2, 3]
def test_remove(db):
db.remove(where('char') == 'b')
assert len(db) == 2
assert db.count(where('int') == 1) == 2
def test_remove_all_fails(db):
with pytest.raises(RuntimeError):
db.remove()
def test_remove_multiple(db):
db.remove(where('int') == 1)
assert len(db) == 0
def test_remove_ids(db):
db.remove(doc_ids=[1, 2])
assert len(db) == 1
def test_remove_returns_ids(db):
assert db.remove(where('char') == 'b') == [2]
def test_update(db):
assert len(db) == 3
db.update({'int': 2}, where('char') == 'a')
assert db.count(where('int') == 2) == 1
assert db.count(where('int') == 1) == 2
def test_update_all(db):
assert db.count(where('int') == 1) == 3
db.update({'newField': True})
assert db.count(where('newField') == True) == 3
def test_update_returns_ids(db):
db.purge()
assert db.insert({'int': 1, 'char': 'a'}) == 1
assert db.insert({'int': 1, 'char': 'a'}) == 2
assert db.update({'char': 'b'}, where('int') == 1) == [1, 2]
def test_update_transform(db):
def increment(field):
def transform(el):
el[field] += 1
return transform
def delete(field):
def transform(el):
del el[field]
return transform
assert db.count(where('int') == 1) == 3
db.update(increment('int'), where('char') == 'a')
db.update(delete('char'), where('char') == 'a')
assert db.count(where('int') == 2) == 1
assert db.count(where('char') == 'a') == 0
assert db.count(where('int') == 1) == 2
def test_update_ids(db):
db.update({'int': 2}, doc_ids=[1, 2])
assert db.count(where('int') == 2) == 2
def test_search(db):
assert not db._query_cache
assert len(db.search(where('int') == 1)) == 3
assert len(db._query_cache) == 1
assert len(db.search(where('int') == 1)) == 3 # Query result from cache
def test_get(db):
item = db.get(where('char') == 'b')
assert item['char'] == 'b'
def test_get_ids(db):
el = db.all()[0]
assert db.get(doc_id=el.doc_id) == el
assert db.get(doc_id=float('NaN')) is None
def test_count(db):
assert db.count(where('int') == 1) == 3
assert db.count(where('char') == 'd') == 0
def test_contains(db):
assert db.contains(where('int') == 1)
assert not db.contains(where('int') == 0)
def test_contains_ids(db):
assert db.contains(doc_ids=[1, 2])
assert not db.contains(doc_ids=[88])
def test_get_idempotent(db):
u = db.get(where('int') == 1)
z = db.get(where('int') == 1)
assert u == z
def test_multiple_dbs():
"""
Regression test for issue #3
"""
db1 = TinyDB(storage=MemoryStorage)
db2 = TinyDB(storage=MemoryStorage)
db1.insert({'int': 1, 'char': 'a'})
db1.insert({'int': 1, 'char': 'b'})
db1.insert({'int': 1, 'value': 5.0})
db2.insert({'color': 'blue', 'animal': 'turtle'})
assert len(db1) == 3
assert len(db2) == 1
def test_storage_closed_once():
class Storage(object):
def __init__(self):
self.closed = False
def read(self):
return {}
def write(self, data):
pass
def close(self):
assert not self.closed
self.closed = True
with TinyDB(storage=Storage) as db:
db.close()
del db
# If db.close() is called during cleanup, the assertion will fail and throw
# and exception
def test_unique_ids(tmpdir):
"""
:type tmpdir: py._path.local.LocalPath
"""
path = str(tmpdir.join('db.json'))
# Verify ids are unique when reopening the DB and inserting
with TinyDB(path) as _db:
_db.insert({'x': 1})
with TinyDB(path) as _db:
_db.insert({'x': 1})
with TinyDB(path) as _db:
data = _db.all()
assert data[0].doc_id != data[1].doc_id
# Verify ids stay unique when inserting/removing
with TinyDB(path) as _db:
_db.purge()
_db.insert_multiple({'x': i} for i in range(5))
_db.remove(where('x') == 2)
assert len(_db) == 4
ids = [e.doc_id for e in _db.all()]
assert len(ids) == len(set(ids))
def test_lastid_after_open(tmpdir):
"""
Regression test for issue #34
:type tmpdir: py._path.local.LocalPath
"""
NUM = 100
path = str(tmpdir.join('db.json'))
with TinyDB(path) as _db:
_db.insert_multiple({'i': i} for i in range(NUM))
with TinyDB(path) as _db:
assert _db._last_id == NUM
@pytest.mark.skipif(sys.version_info >= (3, 0),
reason="requires python2")
def test_unicode_memory(db):
"""
Regression test for issue #28
"""
unic_str = 'ß'.decode('utf-8')
byte_str = 'ß'
db.insert({'value': unic_str})
assert db.contains(where('value') == byte_str)
assert db.contains(where('value') == unic_str)
db.purge()
db.insert({'value': byte_str})
assert db.contains(where('value') == byte_str)
assert db.contains(where('value') == unic_str)
@pytest.mark.skipif(sys.version_info >= (3, 0),
reason="requires python2")
def test_unicode_json(tmpdir):
"""
Regression test for issue #28
"""
unic_str1 = 'a'.decode('utf-8')
byte_str1 = 'a'
unic_str2 = 'ß'.decode('utf-8')
byte_str2 = 'ß'
path = str(tmpdir.join('db.json'))
with TinyDB(path) as _db:
_db.purge()
_db.insert({'value': byte_str1})
_db.insert({'value': byte_str2})
assert _db.contains(where('value') == byte_str1)
assert _db.contains(where('value') == unic_str1)
assert _db.contains(where('value') == byte_str2)
assert _db.contains(where('value') == unic_str2)
with TinyDB(path) as _db:
_db.purge()
_db.insert({'value': unic_str1})
_db.insert({'value': unic_str2})
assert _db.contains(where('value') == byte_str1)
assert _db.contains(where('value') == unic_str1)
assert _db.contains(where('value') == byte_str2)
assert _db.contains(where('value') == unic_str2)
def test_doc_ids_json(tmpdir):
"""
Regression test for issue #45
"""
path = str(tmpdir.join('db.json'))
with TinyDB(path) as _db:
_db.purge()
assert _db.insert({'int': 1, 'char': 'a'}) == 1
assert _db.insert({'int': 1, 'char': 'a'}) == 2
_db.purge()
assert _db.insert_multiple([{'int': 1, 'char': 'a'},
{'int': 1, 'char': 'b'},
{'int': 1, 'char': 'c'}]) == [1, 2, 3]
assert _db.contains(doc_ids=[1, 2])
assert not _db.contains(doc_ids=[88])
_db.update({'int': 2}, doc_ids=[1, 2])
assert _db.count(where('int') == 2) == 2
el = _db.all()[0]
assert _db.get(doc_id=el.doc_id) == el
assert _db.get(doc_id=float('NaN')) is None
_db.remove(doc_ids=[1, 2])
assert len(_db) == 1
def test_insert_string(tmpdir):
path = str(tmpdir.join('db.json'))
with TinyDB(path) as _db:
data = [{'int': 1}, {'int': 2}]
_db.insert_multiple(data)
with pytest.raises(ValueError):
_db.insert([1, 2, 3]) # Fails
with pytest.raises(ValueError):
_db.insert(set(['bark'])) # Fails
assert data == _db.all()
_db.insert({'int': 3}) # Does not fail
def test_insert_invalid_dict(tmpdir):
path = str(tmpdir.join('db.json'))
with TinyDB(path) as _db:
data = [{'int': 1}, {'int': 2}]
_db.insert_multiple(data)
with pytest.raises(TypeError):
_db.insert({'int': set(['bark'])}) # Fails
assert data == _db.all()
_db.insert({'int': 3}) # Does not fail
def test_gc(tmpdir):
# See https://github.com/msiemens/tinydb/issues/92
path = str(tmpdir.join('db.json'))
db = TinyDB(path)
table = db.table('foo')
table.insert({'something': 'else'})
table.insert({'int': 13})
assert len(table.search(where('int') == 13)) == 1
assert table.all() == [{'something': 'else'}, {'int': 13}]
db.close()
def test_non_default_table():
db = TinyDB(storage=MemoryStorage)
assert [TinyDB.DEFAULT_TABLE] == list(db.tables())
db = TinyDB(storage=MemoryStorage, default_table='non-default')
assert set(['non-default']) == db.tables()
db.purge_tables()
default_table = TinyDB.DEFAULT_TABLE
TinyDB.DEFAULT_TABLE = 'non-default'
db = TinyDB(storage=MemoryStorage)
assert set(['non-default']) == db.tables()
TinyDB.DEFAULT_TABLE = default_table
def test_purge_table():
db = TinyDB(storage=MemoryStorage)
assert [TinyDB.DEFAULT_TABLE] == list(db.tables())
db.purge_table(TinyDB.DEFAULT_TABLE)
assert [] == list(db.tables())
table_name = 'some-other-table'
db = TinyDB(storage=MemoryStorage)
db.table(table_name)
assert set([TinyDB.DEFAULT_TABLE, table_name]) == db.tables()
db.purge_table(table_name)
assert set([TinyDB.DEFAULT_TABLE]) == db.tables()
assert table_name not in db._table_cache
db.purge_table('non-existent-table-name')
assert set([TinyDB.DEFAULT_TABLE]) == db.tables()
def test_empty_write(tmpdir):
path = str(tmpdir.join('db.json'))
class ReadOnlyMiddleware(Middleware):
def write(self, data):
raise AssertionError('No write for unchanged db')
TinyDB(path).close()
TinyDB(path, storage=ReadOnlyMiddleware()).close()
def test_query_cache():
db = TinyDB(storage=MemoryStorage)
db.insert_multiple([
{'name': 'foo', 'value': 42},
{'name': 'bar', 'value': -1337}
])
query = where('value') > 0
results = db.search(query)
assert len(results) == 1
# Modify the db instance to not return any results when
# bypassing the query cache
db._table_cache[TinyDB.DEFAULT_TABLE]._read = lambda: {}
# Make sure we got an independent copy of the result list
results.extend([1])
assert db.search(query) == [{'name': 'foo', 'value': 42}]
def test_tinydb_is_iterable(db):
assert [r for r in db] == db.all()
def test_eids(db):
with pytest.warns(DeprecationWarning):
assert db.contains(eids=[1]) is True
with pytest.warns(DeprecationWarning):
db.update({'field': 'value'}, eids=[1])
assert db.contains(where('field') == 'value')
with pytest.warns(DeprecationWarning):
doc = db.get(eid=1)
with pytest.warns(DeprecationWarning):
assert doc.eid == 1
with pytest.warns(DeprecationWarning):
db.remove(eids=[1])
assert not db.contains(where('field') == 'value')
with pytest.raises(TypeError):
db.remove(eids=[1], doc_ids=[1])
with pytest.raises(TypeError):
db.get(eid=[1], doc_id=[1])
| mit |
gnmiller/craig-bot | craig-bot/lib/python3.6/site-packages/discord/http.py | 1 | 32593 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import json
import logging
import sys
from urllib.parse import quote as _uriquote
import weakref
import aiohttp
from .errors import HTTPException, Forbidden, NotFound, LoginFailure, GatewayNotFound
from . import __version__, utils
log = logging.getLogger(__name__)
async def json_or_text(response):
text = await response.text(encoding='utf-8')
if response.headers['content-type'] == 'application/json':
return json.loads(text)
return text
class Route:
BASE = 'https://discordapp.com/api/v7'
def __init__(self, method, path, **parameters):
self.path = path
self.method = method
url = (self.BASE + self.path)
if parameters:
self.url = url.format(**{k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
else:
self.url = url
# major parameters:
self.channel_id = parameters.get('channel_id')
self.guild_id = parameters.get('guild_id')
@property
def bucket(self):
# the bucket is just method + path w/ major parameters
return '{0.method}:{0.channel_id}:{0.guild_id}:{0.path}'.format(self)
class MaybeUnlock:
def __init__(self, lock):
self.lock = lock
self._unlock = True
def __enter__(self):
return self
def defer(self):
self._unlock = False
def __exit__(self, type, value, traceback):
if self._unlock:
self.lock.release()
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the Discord API."""
SUCCESS_LOG = '{method} {url} has received {text}'
REQUEST_LOG = '{method} {url} with {json} has returned {status}'
def __init__(self, connector=None, *, proxy=None, proxy_auth=None, loop=None):
self.loop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session = None # filled in static_login
self._locks = weakref.WeakValueDictionary()
self._global_over = asyncio.Event(loop=self.loop)
self._global_over.set()
self.token = None
self.bot_token = False
self.proxy = proxy
self.proxy_auth = proxy_auth
user_agent = 'DiscordBot (https://github.com/Rapptz/discord.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
def recreate(self):
if self.__session.closed:
self.__session = aiohttp.ClientSession(connector=self.connector, loop=self.loop)
async def request(self, route, *, files=None, header_bypass_delay=None, **kwargs):
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock(loop=self.loop)
if bucket is not None:
self._locks[bucket] = lock
# header creation
headers = {
'User-Agent': self.user_agent,
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token if self.bot_token else self.token
# some checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if files:
for f in files:
f.reset(seek=tries)
async with self.__session.request(method, url, **kwargs) as r:
log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), r.status)
# even errors have text involved in them so this is safe to call
data = await json_or_text(r)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
# we've depleted our current bucket
if header_bypass_delay is None:
delta = utils._parse_ratelimit_header(r)
else:
delta = header_bypass_delay
log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# the request was successful so just return the text/json
if 300 > r.status >= 200:
log.debug('%s %s has received %s', method, url, data)
return data
# we are being rate limited
if r.status == 429:
if not isinstance(data, dict):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"'
# sleep a bit
retry_after = data['retry_after'] / 1000.0
log.warning(fmt, retry_after, bucket)
# check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
self._global_over.clear()
await asyncio.sleep(retry_after, loop=self.loop)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
log.debug('Global rate limit is now over.')
continue
# we've received a 500 or 502, unconditional retry
if r.status in {500, 502}:
await asyncio.sleep(1 + tries * 2, loop=self.loop)
continue
# the usual error cases
if r.status == 403:
raise Forbidden(r, data)
elif r.status == 404:
raise NotFound(r, data)
else:
raise HTTPException(r, data)
# We've run out of retries, raise.
raise HTTPException(r, data)
async def get_from_cdn(self, url):
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
# state management
async def close(self):
if self.__session:
await self.__session.close()
def _token(self, token, *, bot=True):
self.token = token
self.bot_token = bot
self._ack_token = None
# login management
async def static_login(self, token, *, bot):
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, loop=self.loop)
old_token, old_bot = self.token, self.bot_token
self._token(token, bot=bot)
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self._token(old_token, bot=old_bot)
if exc.response.status == 401:
raise LoginFailure('Improper token has been passed.') from exc
raise
return data
def logout(self):
return self.request(Route('POST', '/auth/logout'))
# Group functionality
def start_group(self, user_id, recipients):
payload = {
'recipients': recipients
}
return self.request(Route('POST', '/users/{user_id}/channels', user_id=user_id), json=payload)
def leave_group(self, channel_id):
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id))
def add_group_recipient(self, channel_id, user_id):
r = Route('PUT', '/channels/{channel_id}/recipients/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(r)
def remove_group_recipient(self, channel_id, user_id):
r = Route('DELETE', '/channels/{channel_id}/recipients/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(r)
def edit_group(self, channel_id, **options):
valid_keys = ('name', 'icon')
payload = {
k: v for k, v in options.items() if k in valid_keys
}
return self.request(Route('PATCH', '/channels/{channel_id}', channel_id=channel_id), json=payload)
def convert_group(self, channel_id):
return self.request(Route('POST', '/channels/{channel_id}/convert', channel_id=channel_id))
# Message management
def start_private_message(self, user_id):
payload = {
'recipient_id': user_id
}
return self.request(Route('POST', '/users/@me/channels'), json=payload)
def send_message(self, channel_id, content, *, tts=False, embed=None, nonce=None):
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
payload = {}
if content:
payload['content'] = content
if tts:
payload['tts'] = True
if embed:
payload['embed'] = embed
if nonce:
payload['nonce'] = nonce
return self.request(r, json=payload)
def send_typing(self, channel_id):
return self.request(Route('POST', '/channels/{channel_id}/typing', channel_id=channel_id))
def send_files(self, channel_id, *, files, content=None, tts=False, embed=None, nonce=None):
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
form = aiohttp.FormData()
payload = {'tts': tts}
if content:
payload['content'] = content
if embed:
payload['embed'] = embed
if nonce:
payload['nonce'] = nonce
form.add_field('payload_json', utils.to_json(payload))
if len(files) == 1:
file = files[0]
form.add_field('file', file.fp, filename=file.filename, content_type='application/octet-stream')
else:
for index, file in enumerate(files):
form.add_field('file%s' % index, file.fp, filename=file.filename, content_type='application/octet-stream')
return self.request(r, data=form, files=files)
async def ack_message(self, channel_id, message_id):
r = Route('POST', '/channels/{channel_id}/messages/{message_id}/ack', channel_id=channel_id, message_id=message_id)
data = await self.request(r, json={'token': self._ack_token})
self._ack_token = data['token']
def ack_guild(self, guild_id):
return self.request(Route('POST', '/guilds/{guild_id}/ack', guild_id=guild_id))
def delete_message(self, channel_id, message_id, *, reason=None):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, reason=reason)
def delete_messages(self, channel_id, message_ids, *, reason=None):
r = Route('POST', '/channels/{channel_id}/messages/bulk_delete', channel_id=channel_id)
payload = {
'messages': message_ids
}
return self.request(r, json=payload, reason=reason)
def edit_message(self, channel_id, message_id, **fields):
r = Route('PATCH', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, json=fields)
def add_reaction(self, channel_id, message_id, emoji):
r = Route('PUT', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id, message_id=message_id, emoji=emoji)
return self.request(r, header_bypass_delay=0.25)
def remove_reaction(self, channel_id, message_id, emoji, member_id):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id, message_id=message_id, member_id=member_id, emoji=emoji)
return self.request(r, header_bypass_delay=0.25)
def remove_own_reaction(self, channel_id, message_id, emoji):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id, message_id=message_id, emoji=emoji)
return self.request(r, header_bypass_delay=0.25)
def get_reaction_users(self, channel_id, message_id, emoji, limit, after=None):
r = Route('GET', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id, message_id=message_id, emoji=emoji)
params = {'limit': limit}
if after:
params['after'] = after
return self.request(r, params=params)
def clear_reactions(self, channel_id, message_id):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions',
channel_id=channel_id, message_id=message_id)
return self.request(r)
def get_message(self, channel_id, message_id):
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
def logs_from(self, channel_id, limit, before=None, after=None, around=None):
params = {
'limit': limit
}
if before is not None:
params['before'] = before
if after is not None:
params['after'] = after
if around is not None:
params['around'] = around
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def pin_message(self, channel_id, message_id):
return self.request(Route('PUT', '/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id, message_id=message_id))
def unpin_message(self, channel_id, message_id):
return self.request(Route('DELETE', '/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id, message_id=message_id))
def pins_from(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/pins', channel_id=channel_id))
# Member management
def kick(self, user_id, guild_id, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
if reason:
# thanks aiohttp
r.url = '{0.url}?reason={1}'.format(r, _uriquote(reason))
return self.request(r)
def ban(self, user_id, guild_id, delete_message_days=1, reason=None):
r = Route('PUT', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
params = {
'delete-message-days': delete_message_days,
}
if reason:
# thanks aiohttp
r.url = '{0.url}?reason={1}'.format(r, _uriquote(reason))
return self.request(r, params=params)
def unban(self, user_id, guild_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, reason=reason)
def guild_voice_state(self, user_id, guild_id, *, mute=None, deafen=None, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {}
if mute is not None:
payload['mute'] = mute
if deafen is not None:
payload['deaf'] = deafen
return self.request(r, json=payload, reason=reason)
def edit_profile(self, password, username, avatar, **fields):
payload = {
'password': password,
'username': username,
'avatar': avatar
}
if 'email' in fields:
payload['email'] = fields['email']
if 'new_password' in fields:
payload['new_password'] = fields['new_password']
return self.request(Route('PATCH', '/users/@me'), json=payload)
def change_my_nickname(self, guild_id, nickname, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/@me/nick', guild_id=guild_id)
payload = {
'nick': nickname
}
return self.request(r, json=payload, reason=reason)
def change_nickname(self, guild_id, user_id, nickname, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {
'nick': nickname
}
return self.request(r, json=payload, reason=reason)
def edit_member(self, guild_id, user_id, *, reason=None, **fields):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=fields, reason=reason)
# Channel management
def edit_channel(self, channel_id, *, reason=None, **options):
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = ('name', 'parent_id', 'topic', 'bitrate', 'nsfw',
'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user')
payload = {
k: v for k, v in options.items() if k in valid_keys
}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(self, guild_id, data, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/channels', guild_id=guild_id)
return self.request(r, json=data, reason=reason)
def create_channel(self, guild_id, channel_type, *, reason=None, **options):
payload = {
'type': channel_type
}
valid_keys = ('name', 'parent_id', 'topic', 'bitrate', 'nsfw',
'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user')
payload.update({
k: v for k, v in options.items() if k in valid_keys and v is not None
})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload, reason=reason)
def delete_channel(self, channel_id, *, reason=None):
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
# Webhook management
def create_webhook(self, channel_id, *, name, avatar=None, reason=None):
payload = {
'name': name
}
if avatar is not None:
payload['avatar'] = avatar
r = Route('POST', '/channels/{channel_id}/webhooks', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def channel_webhooks(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/webhooks', channel_id=channel_id))
def guild_webhooks(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/webhooks', guild_id=guild_id))
def get_webhook(self, webhook_id):
return self.request(Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id))
# Guild management
def get_guilds(self, limit, before=None, after=None):
params = {
'limit': limit
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def leave_guild(self, guild_id):
return self.request(Route('DELETE', '/users/@me/guilds/{guild_id}', guild_id=guild_id))
def get_guild(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def delete_guild(self, guild_id):
return self.request(Route('DELETE', '/guilds/{guild_id}', guild_id=guild_id))
def create_guild(self, name, region, icon):
payload = {
'name': name,
'icon': icon,
'region': region
}
return self.request(Route('POST', '/guilds'), json=payload)
def edit_guild(self, guild_id, *, reason=None, **fields):
valid_keys = ('name', 'region', 'icon', 'afk_timeout', 'owner_id',
'afk_channel_id', 'splash', 'verification_level',
'system_channel_id', 'default_message_notifications',
'description', 'explicit_content_filter', 'banner')
payload = {
k: v for k, v in fields.items() if k in valid_keys
}
return self.request(Route('PATCH', '/guilds/{guild_id}', guild_id=guild_id), json=payload, reason=reason)
def get_bans(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/bans', guild_id=guild_id))
def get_ban(self, user_id, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id))
def get_vanity_code(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/vanity-url', guild_id=guild_id))
def change_vanity_code(self, guild_id, code, *, reason=None):
payload = {'code': code}
return self.request(Route('PATCH', '/guilds/{guild_id}/vanity-url', guild_id=guild_id), json=payload, reason=reason)
def get_member(self, guild_id, member_id):
return self.request(Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def prune_members(self, guild_id, days, compute_prune_count, *, reason=None):
params = {
'days': days,
'compute_prune_count': compute_prune_count
}
return self.request(Route('POST', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params, reason=reason)
def estimate_pruned_members(self, guild_id, days):
params = {
'days': days
}
return self.request(Route('GET', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params)
def get_all_custom_emojis(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/emojis', guild_id=guild_id))
def get_custom_emoji(self, guild_id, emoji_id):
return self.request(Route('GET', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id))
def create_custom_emoji(self, guild_id, name, image, *, roles=None, reason=None):
payload = {
'name': name,
'image': image,
'roles': roles or []
}
r = Route('POST', '/guilds/{guild_id}/emojis', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_custom_emoji(self, guild_id, emoji_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, reason=reason)
def edit_custom_emoji(self, guild_id, emoji_id, *, name, roles=None, reason=None):
payload = {
'name': name,
'roles': roles or []
}
r = Route('PATCH', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, json=payload, reason=reason)
def get_audit_logs(self, guild_id, limit=100, before=None, after=None, user_id=None, action_type=None):
params = {'limit': limit}
if before:
params['before'] = before
if after:
params['after'] = after
if user_id:
params['user_id'] = user_id
if action_type:
params['action_type'] = action_type
r = Route('GET', '/guilds/{guild_id}/audit-logs', guild_id=guild_id)
return self.request(r, params=params)
def get_widget(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/widget.json', guild_id=guild_id))
# Invite management
def create_invite(self, channel_id, *, reason=None, **options):
r = Route('POST', '/channels/{channel_id}/invites', channel_id=channel_id)
payload = {
'max_age': options.get('max_age', 0),
'max_uses': options.get('max_uses', 0),
'temporary': options.get('temporary', False),
'unique': options.get('unique', True)
}
return self.request(r, reason=reason, json=payload)
def get_invite(self, invite_id, *, with_counts=True):
params = {
'with_counts': int(with_counts)
}
return self.request(Route('GET', '/invite/{invite_id}', invite_id=invite_id), params=params)
def invites_from(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/invites', guild_id=guild_id))
def invites_from_channel(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/invites', channel_id=channel_id))
def delete_invite(self, invite_id, *, reason=None):
return self.request(Route('DELETE', '/invite/{invite_id}', invite_id=invite_id), reason=reason)
# Role management
def edit_role(self, guild_id, role_id, *, reason=None, **fields):
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
payload = {
k: v for k, v in fields.items() if k in valid_keys
}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id, role_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def replace_roles(self, user_id, guild_id, role_ids, *, reason=None):
return self.edit_member(guild_id=guild_id, user_id=user_id, roles=role_ids, reason=reason)
def create_role(self, guild_id, *, reason=None, **fields):
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def move_role_position(self, guild_id, positions, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=positions, reason=reason)
def add_role(self, guild_id, user_id, role_id, *, reason=None):
r = Route('PUT', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id, user_id=user_id, role_id=role_id)
return self.request(r, reason=reason)
def remove_role(self, guild_id, user_id, role_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id, user_id=user_id, role_id=role_id)
return self.request(r, reason=reason)
def edit_channel_permissions(self, channel_id, target, allow, deny, type, *, reason=None):
payload = {
'id': target,
'allow': allow,
'deny': deny,
'type': type
}
r = Route('PUT', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, json=payload, reason=reason)
def delete_channel_permissions(self, channel_id, target, *, reason=None):
r = Route('DELETE', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, reason=reason)
# Voice management
def move_member(self, user_id, guild_id, channel_id, *, reason=None):
return self.edit_member(guild_id=guild_id, user_id=user_id, channel_id=channel_id, reason=reason)
# Relationship related
def remove_relationship(self, user_id):
r = Route('DELETE', '/users/@me/relationships/{user_id}', user_id=user_id)
return self.request(r)
def add_relationship(self, user_id, type=None):
r = Route('PUT', '/users/@me/relationships/{user_id}', user_id=user_id)
payload = {}
if type is not None:
payload['type'] = type
return self.request(r, json=payload)
def send_friend_request(self, username, discriminator):
r = Route('POST', '/users/@me/relationships')
payload = {
'username': username,
'discriminator': int(discriminator)
}
return self.request(r, json=payload)
# Misc
def application_info(self):
return self.request(Route('GET', '/oauth2/applications/@me'))
async def get_gateway(self, *, encoding='json', v=6, zlib=True):
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v={2}&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v={2}'
return value.format(data['url'], encoding, v)
async def get_bot_gateway(self, *, encoding='json', v=6, zlib=True):
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v={2}&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v={2}'
return data['shards'], value.format(data['url'], encoding, v)
def get_user(self, user_id):
return self.request(Route('GET', '/users/{user_id}', user_id=user_id))
def get_user_profile(self, user_id):
return self.request(Route('GET', '/users/{user_id}/profile', user_id=user_id))
def get_mutual_friends(self, user_id):
return self.request(Route('GET', '/users/{user_id}/relationships', user_id=user_id))
def change_hypesquad_house(self, house_id):
payload = {'house_id': house_id}
return self.request(Route('POST', '/hypesquad/online'), json=payload)
def leave_hypesquad_house(self):
return self.request(Route('DELETE', '/hypesquad/online'))
def edit_settings(self, **payload):
return self.request(Route('PATCH', '/users/@me/settings'), json=payload)
| mit |
nealtodd/django | tests/contenttypes_tests/models.py | 172 | 2882 | from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.http import urlquote
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
date_created = models.DateTimeField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class SchemeIncludedURL(models.Model):
url = models.URLField(max_length=100)
def __str__(self):
return self.url
def get_absolute_url(self):
return self.url
class ConcreteModel(models.Model):
name = models.CharField(max_length=10)
class ProxyModel(ConcreteModel):
class Meta:
proxy = True
@python_2_unicode_compatible
class FooWithoutUrl(models.Model):
"""
Fake model not defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view_without_get_absolute_url()
"""
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class FooWithUrl(FooWithoutUrl):
"""
Fake model defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view().
"""
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.name)
class FooWithBrokenAbsoluteUrl(FooWithoutUrl):
"""
Fake model defining a ``get_absolute_url`` method containing an error
"""
def get_absolute_url(self):
return "/users/%s/" % self.unknown_field
class Question(models.Model):
text = models.CharField(max_length=200)
answer_set = GenericRelation('Answer')
@python_2_unicode_compatible
class Answer(models.Model):
text = models.CharField(max_length=200)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
question = GenericForeignKey()
class Meta:
order_with_respect_to = 'question'
def __str__(self):
return self.text
@python_2_unicode_compatible
class Post(models.Model):
"""An ordered tag on an item."""
title = models.CharField(max_length=200)
content_type = models.ForeignKey(ContentType, models.CASCADE, null=True)
object_id = models.PositiveIntegerField(null=True)
parent = GenericForeignKey()
children = GenericRelation('Post')
class Meta:
order_with_respect_to = 'parent'
def __str__(self):
return self.title
| bsd-3-clause |
charbeljc/OCB | addons/hr_payroll_account/hr_payroll_account.py | 240 | 10840 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'period_id': fields.many2one('account.period', 'Force Period',states={'draft': [('readonly', False)]}, readonly=True, domain=[('state','<>','done')], help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or False
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
period_pool = self.pool.get('account.period')
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
if not slip.period_id:
search_periods = period_pool.find(cr, uid, slip.date_to, context=context)
period_id = search_periods[0]
else:
period_id = slip.period_id.id
default_partner_id = slip.employee_id.address_home_id.id
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'date': timenow,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'period_id': period_id,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
partner_id = line.salary_rule_id.register_id.partner_id and line.salary_rule_id.register_id.partner_id.id or default_partner_id
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_debit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_credit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Credit Account!')%(slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Debit Account!')%(slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_id': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'period_id' : period_id}, context=context)
if slip.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'account_tax_id':fields.many2one('account.tax.code', 'Tax Code'),
'account_debit': fields.many2one('account.account', 'Debit Account'),
'account_credit': fields.many2one('account.account', 'Credit Account'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
maciekcc/tensorflow | tensorflow/contrib/copy_graph/python/util/copy_test.py | 112 | 3739 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.copy_graph.python.util import copy_elements
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
graph1 = ops.Graph()
graph2 = ops.Graph()
class CopyVariablesTest(test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = variables.Variable(2)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = copy_elements.copy_variable_to_graph(some_var, graph2)
#Make another copy with different scope
copy2 = copy_elements.copy_variable_to_graph(some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = session_lib.Session()
#Initialize the Variables
variables.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, variables.Variable)
assert isinstance(copy2, variables.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = array_ops.placeholder("float")
a = variables.Variable(3.0)
b = constant_op.constant(4.0)
ax = math_ops.multiply(x, a)
y = math_ops.add(ax, b)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = copy_elements.copy_variable_to_graph(a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = copy_elements.copy_op_to_graph(y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = copy_elements.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
test.main()
| apache-2.0 |
DNFcode/edx-platform | cms/djangoapps/contentstore/management/commands/check_course.py | 160 | 2722 | from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import check_module_metadata_editability
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = '''Enumerates through the course and find common errors'''
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("check_course requires one argument: <course_id>")
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
store = modulestore()
course = store.get_course(course_key, depth=3)
err_cnt = 0
def _xlint_metadata(module):
err_cnt = check_module_metadata_editability(module)
for child in module.get_children():
err_cnt = err_cnt + _xlint_metadata(child)
return err_cnt
err_cnt = err_cnt + _xlint_metadata(course)
# we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict
def _check_xml_attributes_field(module):
err_cnt = 0
if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring):
print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location)
err_cnt = err_cnt + 1
for child in module.get_children():
err_cnt = err_cnt + _check_xml_attributes_field(child)
return err_cnt
err_cnt = err_cnt + _check_xml_attributes_field(course)
# check for dangling discussion items, this can cause errors in the forums
def _get_discussion_items(module):
discussion_items = []
if module.location.category == 'discussion':
discussion_items = discussion_items + [module.location]
for child in module.get_children():
discussion_items = discussion_items + _get_discussion_items(child)
return discussion_items
discussion_items = _get_discussion_items(course)
# now query all discussion items via get_items() and compare with the tree-traversal
queried_discussion_items = store.get_items(course_key=course_key, qualifiers={'category': 'discussion'})
for item in queried_discussion_items:
if item.location not in discussion_items:
print 'Found dangling discussion module = {0}'.format(item.location)
| agpl-3.0 |
cdegroc/scikit-learn | sklearn/linear_model/setup.py | 1 | 1289 | from os.path import join
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linear_model', parent_package, top_path)
# cd fast needs CBLAS
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
config.add_extension('cd_fast',
sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[numpy.get_include()]
)
# add other directories
config.add_subpackage('tests')
config.add_subpackage('sparse')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
TheTypoMaster/chromium-crosswalk | tools/perf/benchmarks/dromaeo.py | 4 | 9390 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import os
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_test
from telemetry import story
from telemetry.value import scalar
from metrics import power
class _DromaeoMeasurement(page_test.PageTest):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.getElementById("pause") &&' +
'window.document.getElementById("pause").value == "Run"',
120)
# Start spying on POST request that will report benchmark results, and
# intercept result data.
tab.ExecuteJavaScript('(function() {' +
' var real_jquery_ajax_ = window.jQuery;' +
' window.results_ = "";' +
' window.jQuery.ajax = function(request) {' +
' if (request.url == "store.php") {' +
' window.results_ =' +
' decodeURIComponent(request.data);' +
' window.results_ = window.results_.substring(' +
' window.results_.indexOf("=") + 1, ' +
' window.results_.lastIndexOf("&"));' +
' real_jquery_ajax_(request);' +
' }' +
' };' +
'})();')
# Starts benchmark.
tab.ExecuteJavaScript('window.document.getElementById("pause").click();')
tab.WaitForJavaScriptExpression('!!window.results_', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
score = eval(tab.EvaluateJavaScript('window.results_ || "[]"'))
def Escape(k):
chars = [' ', '.', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
def AggregateData(container, key, value):
if key not in container:
container[key] = {'count': 0, 'sum': 0}
container[key]['count'] += 1
container[key]['sum'] += math.log(value)
suffix = page.url[page.url.index('?') + 1 :]
def AddResult(name, value):
important = False
if name == suffix:
important = True
results.AddValue(scalar.ScalarValue(
results.current_page, Escape(name), 'runs/s', value, important))
aggregated = {}
for data in score:
AddResult('%s/%s' % (data['collection'], data['name']),
data['mean'])
top_name = data['collection'].split('-', 1)[0]
AggregateData(aggregated, top_name, data['mean'])
collection_name = data['collection']
AggregateData(aggregated, collection_name, data['mean'])
for key, value in aggregated.iteritems():
AddResult(key, math.exp(value['sum'] / value['count']))
class _DromaeoBenchmark(perf_benchmark.PerfBenchmark):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
@classmethod
def Name(cls):
return 'dromaeo'
def CreateStorySet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
# Subclasses are expected to define class members called query_param and
# tag.
if not hasattr(self, 'query_param') or not hasattr(self, 'tag'):
raise NotImplementedError('query_param or tag not in Dromaeo benchmark.')
archive_data_file = '../page_sets/data/dromaeo.%s.json' % self.tag
ps = story.StorySet(
archive_data_file=archive_data_file,
base_dir=os.path.dirname(os.path.abspath(__file__)),
cloud_storage_bucket=story.PUBLIC_BUCKET)
url = 'http://dromaeo.com?%s' % self.query_param
ps.AddStory(page_module.Page(
url, ps, ps.base_dir, make_javascript_deterministic=False))
return ps
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark.
Tests setting and getting DOM node attributes.
"""
tag = 'domcoreattr'
query_param = 'dom-attr'
@classmethod
def Name(cls):
return 'dromaeo.domcoreattr'
@benchmark.Disabled('xp') # crbug.com/501625
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark.
Tests creating and injecting DOM nodes.
"""
tag = 'domcoremodify'
query_param = 'dom-modify'
@classmethod
def Name(cls):
return 'dromaeo.domcoremodify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark.
Tests querying DOM elements in a document.
"""
tag = 'domcorequery'
query_param = 'dom-query'
@classmethod
def Name(cls):
return 'dromaeo.domcorequery'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark.
Tests traversing a DOM structure.
"""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
@classmethod
def Name(cls):
return 'dromaeo.domcoretraverse'
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrjquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrprototype'
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the jQuery JavaScript
Library.
"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventjquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the Prototype
JavaScript Library.
"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventprototype'
# xp: crbug.com/389731
# win7: http://crbug.com/479796
@benchmark.Disabled('xp', 'win7')
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the jQuery
JavaScript Library.
"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyjquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the Prototype
JavaScript Library.
"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyprototype'
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibstylejquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibstyleprototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the Prototype
JavaScript Library.
"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraversejquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark.
Tests traversing a DOM structure using the jQuery JavaScript Library.
"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraverseprototype'
class DromaeoCSSQueryJquery(_DromaeoBenchmark):
"""Dromaeo CSS Query jquery JavaScript benchmark.
Tests traversing a DOM structure using the Prototype JavaScript Library.
"""
tag = 'cssqueryjquery'
query_param = 'cssquery-jquery'
@classmethod
def Name(cls):
return 'dromaeo.cssqueryjquery'
| bsd-3-clause |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/locks.py | 4 | 12354 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from twisted.internet import defer
from buildbot import util
from buildbot.util import subscription
from buildbot.util.eventual import eventually
if False: # for debugging
debuglog = log.msg
else:
debuglog = lambda m: None
class BaseLock:
"""
Class handling claiming and releasing of L{self}, and keeping track of
current and waiting owners.
We maintain the wait queue in FIFO order, and ensure that counting waiters
in the queue behind exclusive waiters cannot acquire the lock. This ensures
that exclusive waiters are not starved.
"""
description = "<BaseLock>"
def __init__(self, name, maxCount=1):
self.name = name # Name of the lock
self.waiting = [] # Current queue, tuples (waiter, LockAccess,
# deferred)
self.owners = [] # Current owners, tuples (owner, LockAccess)
self.maxCount = maxCount # maximal number of counting owners
# subscriptions to this lock being released
self.release_subs = subscription.SubscriptionPoint("%r releases"
% (self,))
def __repr__(self):
return self.description
def _getOwnersCount(self):
""" Return the number of current exclusive and counting owners.
@return: Tuple (number exclusive owners, number counting owners)
"""
num_excl, num_counting = 0, 0
for owner in self.owners:
if owner[1].mode == 'exclusive':
num_excl = num_excl + 1
else: # mode == 'counting'
num_counting = num_counting + 1
assert (num_excl == 1 and num_counting == 0) \
or (num_excl == 0 and num_counting <= self.maxCount)
return num_excl, num_counting
def isAvailable(self, requester, access):
""" Return a boolean whether the lock is available for claiming """
debuglog("%s isAvailable(%s, %s): self.owners=%r"
% (self, requester, access, self.owners))
num_excl, num_counting = self._getOwnersCount()
# Find all waiters ahead of the requester in the wait queue
for idx, waiter in enumerate(self.waiting):
if waiter[0] == requester:
w_index = idx
break
else:
w_index = len(self.waiting)
ahead = self.waiting[:w_index]
if access.mode == 'counting':
# Wants counting access
return num_excl == 0 and num_counting + len(ahead) < self.maxCount \
and all([w[1].mode == 'counting' for w in ahead])
else:
# Wants exclusive access
return num_excl == 0 and num_counting == 0 and len(ahead) == 0
def claim(self, owner, access):
""" Claim the lock (lock must be available) """
debuglog("%s claim(%s, %s)" % (self, owner, access.mode))
assert owner is not None
assert self.isAvailable(owner, access), "ask for isAvailable() first"
assert isinstance(access, LockAccess)
assert access.mode in ['counting', 'exclusive']
self.waiting = [w for w in self.waiting if w[0] != owner]
self.owners.append((owner, access))
debuglog(" %s is claimed '%s'" % (self, access.mode))
def subscribeToReleases(self, callback):
"""Schedule C{callback} to be invoked every time this lock is
released. Returns a L{Subscription}."""
return self.release_subs.subscribe(callback)
def release(self, owner, access):
""" Release the lock """
assert isinstance(access, LockAccess)
debuglog("%s release(%s, %s)" % (self, owner, access.mode))
entry = (owner, access)
if not entry in self.owners:
debuglog("%s already released" % self)
return
self.owners.remove(entry)
# who can we wake up?
# After an exclusive access, we may need to wake up several waiting.
# Break out of the loop when the first waiting client should not be awakened.
num_excl, num_counting = self._getOwnersCount()
for i, (w_owner, w_access, d) in enumerate(self.waiting):
if w_access.mode == 'counting':
if num_excl > 0 or num_counting == self.maxCount:
break
else:
num_counting = num_counting + 1
else:
# w_access.mode == 'exclusive'
if num_excl > 0 or num_counting > 0:
break
else:
num_excl = num_excl + 1
# If the waiter has a deferred, wake it up and clear the deferred
# from the wait queue entry to indicate that it has been woken.
if d:
self.waiting[i] = (w_owner, w_access, None)
eventually(d.callback, self)
# notify any listeners
self.release_subs.deliver()
def waitUntilMaybeAvailable(self, owner, access):
"""Fire when the lock *might* be available. The caller will need to
check with isAvailable() when the deferred fires. This loose form is
used to avoid deadlocks. If we were interested in a stronger form,
this would be named 'waitUntilAvailable', and the deferred would fire
after the lock had been claimed.
"""
debuglog("%s waitUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
if self.isAvailable(owner, access):
return defer.succeed(self)
d = defer.Deferred()
# Are we already in the wait queue?
w = [i for i, w in enumerate(self.waiting) if w[0] == owner]
if w:
self.waiting[w[0]] = (owner, access, d)
else:
self.waiting.append((owner, access, d))
return d
def stopWaitingUntilAvailable(self, owner, access, d):
debuglog("%s stopWaitingUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
assert (owner, access, d) in self.waiting
self.waiting = [w for w in self.waiting if w[0] != owner]
def isOwner(self, owner, access):
return (owner, access) in self.owners
class RealMasterLock(BaseLock):
def __init__(self, lockid):
BaseLock.__init__(self, lockid.name, lockid.maxCount)
self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
def getLock(self, slave):
return self
class RealSlaveLock:
def __init__(self, lockid):
self.name = lockid.name
self.maxCount = lockid.maxCount
self.maxCountForSlave = lockid.maxCountForSlave
self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
self.maxCount,
self.maxCountForSlave)
self.locks = {}
def __repr__(self):
return self.description
def getLock(self, slave):
slavename = slave.slavename
if not self.locks.has_key(slavename):
maxCount = self.maxCountForSlave.get(slavename,
self.maxCount)
lock = self.locks[slavename] = BaseLock(self.name, maxCount)
desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
slavename, id(lock))
lock.description = desc
self.locks[slavename] = lock
return self.locks[slavename]
class LockAccess(util.ComparableMixin):
""" I am an object representing a way to access a lock.
@param lockid: LockId instance that should be accessed.
@type lockid: A MasterLock or SlaveLock instance.
@param mode: Mode of accessing the lock.
@type mode: A string, either 'counting' or 'exclusive'.
"""
compare_attrs = ['lockid', 'mode']
def __init__(self, lockid, mode, _skipChecks=False):
self.lockid = lockid
self.mode = mode
if not _skipChecks:
# these checks fail with mock < 0.8.0 when lockid is a Mock
# TODO: remove this in Buildbot-0.9.0+
assert isinstance(lockid, (MasterLock, SlaveLock))
assert mode in ['counting', 'exclusive']
class BaseLockId(util.ComparableMixin):
""" Abstract base class for LockId classes.
Sets up the 'access()' function for the LockId's available to the user
(MasterLock and SlaveLock classes).
Derived classes should add
- Comparison with the L{util.ComparableMixin} via the L{compare_attrs}
class variable.
- Link to the actual lock class should be added with the L{lockClass}
class variable.
"""
def access(self, mode):
""" Express how the lock should be accessed """
assert mode in ['counting', 'exclusive']
return LockAccess(self, mode)
def defaultAccess(self):
""" For buildbot 0.7.7 compability: When user doesn't specify an access
mode, this one is chosen.
"""
return self.access('counting')
# master.cfg should only reference the following MasterLock and SlaveLock
# classes. They are identifiers that will be turned into real Locks later,
# via the BotMaster.getLockByID method.
class MasterLock(BaseLockId):
"""I am a semaphore that limits the number of simultaneous actions.
Builds and BuildSteps can declare that they wish to claim me as they run.
Only a limited number of such builds or steps will be able to run
simultaneously. By default this number is one, but my maxCount parameter
can be raised to allow two or three or more operations to happen at the
same time.
Use this to protect a resource that is shared among all builders and all
slaves, for example to limit the load on a common SVN repository.
"""
compare_attrs = ['name', 'maxCount']
lockClass = RealMasterLock
def __init__(self, name, maxCount=1):
self.name = name
self.maxCount = maxCount
class SlaveLock(BaseLockId):
"""I am a semaphore that limits simultaneous actions on each buildslave.
Builds and BuildSteps can declare that they wish to claim me as they run.
Only a limited number of such builds or steps will be able to run
simultaneously on any given buildslave. By default this number is one,
but my maxCount parameter can be raised to allow two or three or more
operations to happen on a single buildslave at the same time.
Use this to protect a resource that is shared among all the builds taking
place on each slave, for example to limit CPU or memory load on an
underpowered machine.
Each buildslave will get an independent copy of this semaphore. By
default each copy will use the same owner count (set with maxCount), but
you can provide maxCountForSlave with a dictionary that maps slavename to
owner count, to allow some slaves more parallelism than others.
"""
compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
lockClass = RealSlaveLock
def __init__(self, name, maxCount=1, maxCountForSlave={}):
self.name = name
self.maxCount = maxCount
self.maxCountForSlave = maxCountForSlave
# for comparison purposes, turn this dictionary into a stably-sorted
# list of tuples
self._maxCountForSlaveList = self.maxCountForSlave.items()
self._maxCountForSlaveList.sort()
self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)
| gpl-2.0 |
friedrich420/HTC-ONE-M7-AEL-Kernel-5.0.2 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 4 | 6151 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + self._data.keys())
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
ahamilton55/ansible | lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py | 57 | 7360 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_migrate_vmk
short_description: Migrate a VMK interface from VSS to VDS
description:
- Migrate a VMK interface from VSS to VDS
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname to be managed
required: True
device:
description:
- VMK interface name
required: True
current_switch_name:
description:
- Switch VMK interface is currently on
required: True
current_portgroup_name:
description:
- Portgroup name VMK interface is currently on
required: True
migrate_switch_name:
description:
- Switch name to migrate VMK interface to
required: True
migrate_portgroup_name:
description:
- Portgroup name to migrate VMK interface to
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example from Ansible playbook
- name: Migrate Management vmk
local_action:
module: vmware_migrate_vmk
hostname: vcsa_host
username: vcsa_user
password: vcsa_pass
esxi_hostname: esxi_hostname
device: vmk1
current_switch_name: temp_vswitch
current_portgroup_name: esx-mgmt
migrate_switch_name: dvSwitch
migrate_portgroup_name: Management
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareMigrateVmk(object):
def __init__(self, module):
self.module = module
self.host_system = None
self.migrate_switch_name = self.module.params['migrate_switch_name']
self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
self.device = self.module.params['device']
self.esxi_hostname = self.module.params['esxi_hostname']
self.current_portgroup_name = self.module.params['current_portgroup_name']
self.current_switch_name = self.module.params['current_switch_name']
self.content = connect_to_api(module)
def process_state(self):
try:
vmk_migration_states = {
'migrate_vss_vds': self.state_migrate_vss_vds,
'migrate_vds_vss': self.state_migrate_vds_vss,
'migrated': self.state_exit_unchanged
}
vmk_migration_states[self.check_vmk_current_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_migrate_vds_vss(self):
self.module.exit_json(changed=False, msg="Currently Not Implemented")
def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
host_vnic_config = vim.host.VirtualNic.Config()
host_vnic_config.spec = vim.host.VirtualNic.Specification()
host_vnic_config.changeOperation = "edit"
host_vnic_config.device = self.device
host_vnic_config.portgroup = ""
host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
return host_vnic_config
def create_port_group_config(self):
port_group_config = vim.host.PortGroup.Config()
port_group_config.spec = vim.host.PortGroup.Specification()
port_group_config.changeOperation = "remove"
port_group_config.spec.name = self.current_portgroup_name
port_group_config.spec.vlanId = -1
port_group_config.spec.vswitchName = self.current_switch_name
port_group_config.spec.policy = vim.host.NetworkPolicy()
return port_group_config
def state_migrate_vss_vds(self):
host_network_system = self.host_system.configManager.networkSystem
dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
config = vim.host.NetworkConfig()
config.portgroup = [self.create_port_group_config()]
config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
host_network_system.UpdateNetworkConfig(config, "modify")
self.module.exit_json(changed=True)
def check_vmk_current_state(self):
self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
if vnic.device == self.device:
#self.vnic = vnic
if vnic.spec.distributedVirtualPort is None:
if vnic.portgroup == self.current_portgroup_name:
return "migrate_vss_vds"
else:
dvs = find_dvs_by_name(self.content, self.current_switch_name)
if dvs is None:
return "migrated"
if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
return "migrate_vds_vss"
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
device=dict(required=True, type='str'),
current_switch_name=dict(required=True, type='str'),
current_portgroup_name=dict(required=True, type='str'),
migrate_switch_name=dict(required=True, type='str'),
migrate_portgroup_name=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
self.module.fail_json(msg='pyvmomi required for this module')
vmware_migrate_vmk = VMwareMigrateVmk(module)
vmware_migrate_vmk.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tedlaz/pyted | sms/requests/packages/idna/codec.py | 426 | 3299 | from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
def encode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return "", 0
return encode(data), len(data)
def decode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return u"", 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return ("", 0)
labels = _unicode_dots_re.split(data)
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(alabel(label))
if size:
size += 1
size += len(label)
# Join with U+002E
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return (u"", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(data, unicode):
labels = _unicode_dots_re.split(data)
else:
# Must be ASCII string
data = str(data)
unicode(data, "ascii")
labels = data.split(".")
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = u'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = u'.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result = u".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-3.0 |
informatik-mannheim/Moduro-CC3D | Simulation/ModuroModel/Spa/SpaSdbCdiInDa.py | 1 | 1536 | # Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Julian Debatin"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "[email protected]"
__status__ = "Production"
from ModuroModel.Spa.SpaSdbCdiInUa import SpaSdbCdiInUa
class SpaSdbCdiInDa(SpaSdbCdiInUa):
def __init__(self, sim, simthread):
SpaSdbCdiInUa.__init__(self, sim, simthread)
def _initModel(self):
self.name = "SpaSdbCdiInDa"
self.adhFactor = 0.25
self.cellTypes = self._createCellTypes()
self.energyMatrix = self._createEnergyMatrix()
self._run() # Must be the last statement.
def _createEnergyMatrix(self):
energyMatrix = [[0, 14, 14, 14, 14, 4],
[0, -1, 1, 3, 12, 12],
[0, 0, 6, 4, 8, 14],
[0, 0, 0, 5, 8, 12],
[0, 0, 0, 0, 6, 4],
[0, 0, 0, 0, 0, 2]]
return energyMatrix | apache-2.0 |
pong3489/TEST_Mission | Lib/site-packages/numpy/NumpyDotNet/bin/Tests/test_dtype.py | 141 | 5362 | import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
self.assertRaises(TypeError, np.dtype,
dict(names=set(['A', 'B']), formats=['f8', 'i4']))
self.assertRaises(TypeError, np.dtype,
dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
class TestSubarray(TestCase):
def test_single_subarray(self):
a = np.dtype((np.int, (2)))
b = np.dtype((np.int, (2,)))
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
b = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
self.assertTrue(hash(a) == hash(b))
c = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
self.assertTrue(hash(c) == hash(d))
class TestBasicFunctions(TestCase):
def test_compare(self):
a = np.dtype('i')
b = np.dtype('i')
self.assertTrue(a == b)
a = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
b = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
c = np.dtype([('two', np.dtype('i')), ('one', np.dtype('d'))])
self.assertTrue(a == a)
self.assertTrue(a == b)
self.assertFalse(b == c)
self.assertFalse(a != a)
self.assertFalse(a != b)
self.assertTrue(b != c)
# Try using the repeat operation and make sure the base is correct.
c = b * 3
self.assertFalse(c == b)
self.assertTrue(c.base == b)
def test_seq(self):
a = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
self.assertTrue(a[0] == np.dtype('d'))
self.assertTrue(a['two'] == np.dtype('i'))
self.assertFalse(a['two'] == np.dtype('d'))
try:
x = a[2]
self.assertTrue(False, "Failed to catch index out of range exception.")
except:
pass
try:
x = a['foo']
self.assertTrue(False, 'Failed to catch incorrect field name exception.')
except:
pass
# Make sure scalar int values work as index values.
arr = np.arange(4)
self.assertTrue(a[arr[0]] == np.dtype('d'))
self.assertTrue(a[arr[1]] == np.dtype('i'))
try:
x = a[arr[2]]
self.assertTrue(False, 'Failed to catch index out of range exception using ScalarInt index value.')
except:
pass
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-3.0 |
beepaste/beepaste | beepaste/views/viewPaste/views.py | 1 | 1832 | from pyramid.response import Response
from pyramid.view import view_config
from beepaste.models.pastes import Pastes
from beepaste.pasteFunctions import pasteExists
from pyramid.httpexceptions import HTTPNotFound, HTTPFound
import base64
@view_config(route_name='view_raw', renderer='templates/pasteRaw.jinja2')
def viewRaw(request):
uri = request.matchdict['pasteID']
if not pasteExists(uri, request):
raise HTTPNotFound()
paste = request.dbsession.query(Pastes).filter_by(pasteURI=uri).first()
raw = base64.b64decode(paste.text.encode('utf-8')).decode('utf-8')
request.response.content_type = "text/plain; charset=UTF-8"
return {'raw': raw}
@view_config(route_name='view_embed', renderer='templates/pasteEmbed.jinja2')
def viewEmbed(request):
uri = request.matchdict['pasteID']
if not pasteExists(uri, request):
raise HTTPNotFound()
paste = request.dbsession.query(Pastes).filter_by(pasteURI=uri).first()
title = paste.title + " - " + request.registry.settings['beepaste.siteName']
return {'paste': paste, 'title': title}
@view_config(route_name='view_paste', renderer='templates/pasteView.jinja2')
def viewPaste(request):
uri = request.matchdict['pasteID']
if not pasteExists(uri, request):
raise HTTPNotFound()
paste = request.dbsession.query(Pastes).filter_by(pasteURI=uri).first()
embedCode = '<iframe src="' + request.route_url('view_embed', pasteID=paste.pasteURI) +'" style="border:none;width:100%;min-height:300px;"></iframe>'
title = paste.title + " - " + request.registry.settings['beepaste.siteName']
description = "Paste by "+ paste.name + ", Created about " + paste.created_in_words() + " ago. View more information in link!"
return {'paste': paste, 'embedCode': embedCode, 'title': title, 'description': description}
| gpl-3.0 |
vitan/hue | desktop/core/ext-py/Paste-1.7.2/paste/debug/testserver.py | 28 | 3385 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
WSGI Test Server
This builds upon paste.util.baseserver to customize it for regressions
where using raw_interactive won't do.
"""
import time
from paste.httpserver import *
class WSGIRegressionServer(WSGIServer):
"""
A threaded WSGIServer for use in regression testing. To use this
module, call serve(application, regression=True), and then call
server.accept() to let it handle one request. When finished, use
server.stop() to shutdown the server. Note that all pending requests
are processed before the server shuts down.
"""
defaulttimeout = 10
def __init__ (self, *args, **kwargs):
WSGIServer.__init__(self, *args, **kwargs)
self.stopping = []
self.pending = []
self.timeout = self.defaulttimeout
# this is a local connection, be quick
self.socket.settimeout(2)
def serve_forever(self):
from threading import Thread
thread = Thread(target=self.serve_pending)
thread.start()
def reset_expires(self):
if self.timeout:
self.expires = time.time() + self.timeout
def close_request(self, *args, **kwargs):
WSGIServer.close_request(self, *args, **kwargs)
self.pending.pop()
self.reset_expires()
def serve_pending(self):
self.reset_expires()
while not self.stopping or self.pending:
now = time.time()
if now > self.expires and self.timeout:
# note regression test doesn't handle exceptions in
# threads very well; so we just print and exit
print "\nWARNING: WSGIRegressionServer timeout exceeded\n"
break
if self.pending:
self.handle_request()
time.sleep(.1)
def stop(self):
""" stop the server (called from tester's thread) """
self.stopping.append(True)
def accept(self, count = 1):
""" accept another request (called from tester's thread) """
assert not self.stopping
[self.pending.append(True) for x in range(count)]
def serve(application, host=None, port=None, handler=None):
server = WSGIRegressionServer(application, host, port, handler)
print "serving on %s:%s" % server.server_address
server.serve_forever()
return server
if __name__ == '__main__':
import urllib
from paste.wsgilib import dump_environ
server = serve(dump_environ)
baseuri = ("http://%s:%s" % server.server_address)
def fetch(path):
# tell the server to humor exactly one more request
server.accept(1)
# not needed; but this is what you do if the server
# may not respond in a resonable time period
import socket
socket.setdefaulttimeout(5)
# build a uri, fetch and return
return urllib.urlopen(baseuri + path).read()
assert "PATH_INFO: /foo" in fetch("/foo")
assert "PATH_INFO: /womble" in fetch("/womble")
# ok, let's make one more final request...
server.accept(1)
# and then schedule a stop()
server.stop()
# and then... fetch it...
urllib.urlopen(baseuri)
| apache-2.0 |
erkanay/django | tests/syndication_tests/urls.py | 42 | 1028 | from django.conf.urls import url
from . import feeds
urlpatterns = [
url(r'^syndication/rss2/$', feeds.TestRss2Feed()),
url(r'^syndication/rss2/guid_ispermalink_true/$',
feeds.TestRss2FeedWithGuidIsPermaLinkTrue()),
url(r'^syndication/rss2/guid_ispermalink_false/$',
feeds.TestRss2FeedWithGuidIsPermaLinkFalse()),
url(r'^syndication/rss091/$', feeds.TestRss091Feed()),
url(r'^syndication/no_pubdate/$', feeds.TestNoPubdateFeed()),
url(r'^syndication/atom/$', feeds.TestAtomFeed()),
url(r'^syndication/latest/$', feeds.TestLatestFeed()),
url(r'^syndication/custom/$', feeds.TestCustomFeed()),
url(r'^syndication/naive-dates/$', feeds.NaiveDatesFeed()),
url(r'^syndication/aware-dates/$', feeds.TZAwareDatesFeed()),
url(r'^syndication/feedurl/$', feeds.TestFeedUrlFeed()),
url(r'^syndication/articles/$', feeds.ArticlesFeed()),
url(r'^syndication/template/$', feeds.TemplateFeed()),
url(r'^syndication/template_context/$', feeds.TemplateContextFeed()),
]
| bsd-3-clause |
cernopendata/opendata.cern.ch | cernopendata/modules/sitemap/generators.py | 3 | 2158 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Open Data Portal.
# Copyright (C) 2018 CERN.
#
# CERN Open Data Portal is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Open Data Portal is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Sitemap generation for CERN Open Data Portal."""
import arrow
from flask import current_app, url_for
from invenio_db import db
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records.models import RecordMetadata
def _sitemapdtformat(dt):
"""Convert a datetime to a W3 Date and Time format.
Converts the date to a minute-resolution datetime timestamp with a special
UTC designator 'Z'. See more information at
https://www.w3.org/TR/NOTE-datetime.
"""
adt = arrow.Arrow.fromdatetime(dt).to('utc')
return adt.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
def urls_generator(doc_type):
"""Generate the records links."""
q = (db.session.query(PersistentIdentifier, RecordMetadata)
.join(RecordMetadata,
RecordMetadata.id == PersistentIdentifier.object_uuid)
.filter(PersistentIdentifier.status == PIDStatus.REGISTERED,
PersistentIdentifier.pid_type == doc_type))
scheme = current_app.config['CERNOPENDATA_SITEMAP_URL_SCHEME']
for pid, rm in q.yield_per(1000):
yield {
'loc': url_for('invenio_records_ui.{0}'.format(doc_type),
pid_value=pid.pid_value,
_external=True, _scheme=scheme),
'lastmod': _sitemapdtformat(rm.updated)
}
| gpl-2.0 |
petemounce/ansible | test/sanity/validate-modules/test_validate_modules_regex.py | 162 | 2807 | #!/usr/bin/env python
# This is a standalone test for the regex inside validate-modules
# It is not suitable to add to the make tests target because the
# file under test is outside the test's sys.path AND has a hyphen
# in the name making it unimportable.
#
# To execute this by hand:
# 1) cd <checkoutdir>
# 2) source hacking/env-setup
# 3) PYTHONPATH=./lib nosetests -d -w test -v --nocapture sanity/validate-modules
import re
from ansible.compat.tests import unittest
# TYPE_REGEX = re.compile(r'.*\stype\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)\stype\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)(?<!_)type\(.*')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)(?<!_)(?<!str\()type\(.*')
class TestValidateModulesRegex(unittest.TestCase):
def test_type_regex(self):
# each of these examples needs to be matched or not matched
checks = [
['if type(foo) is Bar', True],
['if Bar is type(foo)', True],
['if type(foo) is not Bar', True],
['if Bar is not type(foo)', True],
['if type(foo) == Bar', True],
['if Bar == type(foo)', True],
['if type(foo)==Bar', True],
['if Bar==type(foo)', True],
['if type(foo) != Bar', True],
['if Bar != type(foo)', True],
['if type(foo)!=Bar', True],
['if Bar!=type(foo)', True],
['if foo or type(bar) != Bar', True],
['x = type(foo)', False],
["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
# cloud/amazon/ec2_group.py
["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
# files/patch.py
["p = type('Params', (), module.params)", False], # files/patch.py
# system/osx_defaults.py
["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
# system/osx_defaults.py
['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
# network/nxos/nxos_interface.py
["if get_interface_type(interface) == 'svi':", False],
]
for idc, check in enumerate(checks):
cstring = check[0]
cexpected = check[1]
match = TYPE_REGEX.match(cstring)
if cexpected and not match:
assert False, "%s should have matched" % cstring
elif not cexpected and match:
assert False, "%s should not have matched" % cstring
| gpl-3.0 |
omnirom/android_kernel_htc_msm8960 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
sodexis/odoo | openerp/report/print_xml.py | 338 | 11063 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import openerp
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval
import print_fnc
from openerp.osv.orm import BaseModel
class InheritDict(dict):
# Might be usefull when we're doing name lookup for call or eval.
def __init__(self, parent=None):
self.parent = parent
def __getitem__(self, name):
if name in self:
return super(InheritDict, self).__getitem__(name)
else:
if not self.parent:
raise KeyError
else:
return self.parent[name]
def tounicode(val):
if isinstance(val, str):
unicode_val = unicode(val, 'utf-8')
elif isinstance(val, unicode):
unicode_val = val
else:
unicode_val = unicode(val)
return unicode_val
class document(object):
def __init__(self, cr, uid, datas, func=False):
# create a new document
self.cr = cr
self.pool = openerp.registry(cr.dbname)
self.func = func or {}
self.datas = datas
self.uid = uid
self.bin_datas = {}
def node_attrs_get(self, node):
if len(node.attrib):
return node.attrib
return {}
def get_value(self, browser, field_path):
fields = field_path.split('.')
if not len(fields):
return ''
value = browser
for f in fields:
if isinstance(value, (BaseModel, list)):
if not value:
return ''
value = value[0]
value = value[f]
return value or ''
def get_value2(self, browser, field_path):
value = self.get_value(browser, field_path)
if isinstance(value, BaseModel):
return value.id
else:
return value
def eval(self, record, expr):
#TODO: support remote variables (eg address.title) in expr
# how to do that: parse the string, find dots, replace those dotted variables by temporary
# "simple ones", fetch the value of those variables and add them (temporarily) to the _data
# dictionary passed to eval
#FIXME: it wont work if the data hasn't been fetched yet... this could
# happen if the eval node is the first one using this Record
# the next line is a workaround for the problem: it causes the resource to be loaded
#Pinky: Why not this ? eval(expr, browser) ?
# name = browser.name
# data_dict = browser._data[self.get_value(browser, 'id')]
return safe_eval(expr, {}, {'obj': record})
def parse_node(self, node, parent, browser, datas=None):
attrs = self.node_attrs_get(node)
if 'type' in attrs:
if attrs['type']=='field':
value = self.get_value(browser, attrs['name'])
#TODO: test this
if value == '' and 'default' in attrs:
value = attrs['default']
el = etree.SubElement(parent, node.tag)
el.text = tounicode(value)
#TODO: test this
for key, value in attrs.iteritems():
if key not in ('type', 'name', 'default'):
el.set(key, value)
elif attrs['type']=='attachment':
model = browser._name
value = self.get_value(browser, attrs['name'])
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model','=',model),('res_id','=',int(value))])
datas = self.pool['ir.attachment'].read(self.cr, self.uid, ids)
if len(datas):
# if there are several, pick first
datas = datas[0]
fname = str(datas['datas_fname'])
ext = fname.split('.')[-1].lower()
if ext in ('jpg','jpeg', 'png'):
import base64
from StringIO import StringIO
dt = base64.decodestring(datas['datas'])
fp = StringIO()
fp.write(dt)
i = str(len(self.bin_datas))
self.bin_datas[i] = fp
el = etree.SubElement(parent, node.tag)
el.text = i
elif attrs['type']=='data':
#TODO: test this
txt = self.datas.get('form', {}).get(attrs['name'], '')
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='function':
if attrs['name'] in self.func:
txt = self.func[attrs['name']](node)
else:
txt = print_fnc.print_fnc(attrs['name'], node)
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='eval':
value = self.eval(browser, attrs['expr'])
el = etree.SubElement(parent, node.tag)
el.text = str(value)
elif attrs['type']=='fields':
fields = attrs['name'].split(',')
vals = {}
for b in browser:
value = tuple([self.get_value2(b, f) for f in fields])
if not value in vals:
vals[value]=[]
vals[value].append(b)
keys = vals.keys()
keys.sort()
if 'order' in attrs and attrs['order']=='desc':
keys.reverse()
v_list = [vals[k] for k in keys]
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
elif attrs['type']=='call':
if len(attrs['args']):
#TODO: test this
# fetches the values of the variables which names where passed in the args attribute
args = [self.eval(browser, arg) for arg in attrs['args'].split(',')]
else:
args = []
# get the object
if 'model' in attrs:
obj = self.pool[attrs['model']]
else:
obj = browser # the record(set) is an instance of the model
# get the ids
if 'ids' in attrs:
ids = self.eval(browser, attrs['ids'])
else:
ids = browse.ids
# call the method itself
newdatas = getattr(obj, attrs['name'])(self.cr, self.uid, ids, *args)
def parse_result_tree(node, parent, datas):
if not node.tag == etree.Comment:
el = etree.SubElement(parent, node.tag)
atr = self.node_attrs_get(node)
if 'value' in atr:
if not isinstance(datas[atr['value']], (str, unicode)):
txt = str(datas[atr['value']])
else:
txt = datas[atr['value']]
el.text = txt
else:
for el_cld in node:
parse_result_tree(el_cld, el, datas)
if not isinstance(newdatas, (BaseModel, list)):
newdatas = [newdatas]
for newdata in newdatas:
parse_result_tree(node, parent, newdata)
elif attrs['type']=='zoom':
value = self.get_value(browser, attrs['name'])
if value:
if not isinstance(value, (BaseModel, list)):
v_list = [value]
else:
v_list = value
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
else:
# if there is no "type" attribute in the node, copy it to the xml data and parse its children
if not node.tag == etree.Comment:
if node.tag == parent.tag:
el = parent
else:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld,el, browser)
def xml_get(self):
return etree.tostring(self.doc,encoding="utf-8",xml_declaration=True,pretty_print=True)
def parse_tree(self, ids, model, context=None):
if not context:
context={}
browser = self.pool[model].browse(self.cr, self.uid, ids, context)
self.parse_node(self.dom, self.doc, browser)
def parse_string(self, xml, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
self.dom = etree.XML(xml)
# create the xml data from the xml template
self.parse_tree(ids, model, context)
def parse(self, filename, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
src_file = tools.file_open(filename)
try:
self.dom = etree.XML(src_file.read())
self.doc = etree.Element(self.dom.tag)
self.parse_tree(ids, model, context)
finally:
src_file.close()
def close(self):
self.doc = None
self.dom = None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mrquim/mrquimrepo | repo/script.module.trakt/lib/trakt/interfaces/shows/__init__.py | 4 | 2756 | from __future__ import absolute_import, division, print_function
from trakt.interfaces.base import Interface
from trakt.mapper.summary import SummaryMapper
import requests
class ShowsInterface(Interface):
path = 'shows'
def get(self, id, extended=None, **kwargs):
response = self.http.get(str(id), query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.show(self.client, item)
def trending(self, extended=None, **kwargs):
response = self.http.get('trending', query={
'extended': extended
})
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
return SummaryMapper.shows(self.client, items)
def next_episode(self, id, extended=None, **kwargs):
response = self.http.get(str(id), 'next_episode', query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
def last_episode(self, id, extended=None, **kwargs):
response = self.http.get(str(id), 'last_episode', query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
def seasons(self, id, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons'
], query={
'extended': extended
})
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
return SummaryMapper.seasons(self.client, items)
def season(self, id, season, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons', str(season)
], query={
'extended': extended
})
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
return SummaryMapper.episodes(self.client, items)
def episode(self, id, season, episode, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons', str(season),
'episodes', str(episode)
], query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
| gpl-2.0 |
neuroo/equip | tests/test_bz_1.py | 1 | 6303 | import pytest
from itertools import tee, izip
from testutils import get_co, get_bytecode
from equip import BytecodeObject
from equip.bytecode.utils import show_bytecode
import equip.utils.log as logutils
from equip.utils.log import logger
logutils.enableLogger(to_file='./equip.log')
from equip.analysis import ControlFlow, BasicBlock
#
# https://github.com/neuroo/equip/issues/2
#
FAULTY_PROGRAM = """
def hey(self):
def _cam_wo(foo):
if foo is not None:
if foo.startswith("/p"):
return True
if foo.startswith("/j"):
return True
return False
ray = orange.grab.this('ray', None)
lenny = 't' if ray is not None else 'f'
briscoe, law = And.order(orange.environ,
orange.grab)
if law and not briscoe:
return guilty(law)
q.jury = ""
q.judge = False
q.executioner = 0
water = orange.grab.this('banana', None)
paper = foo('hey')
if And.stop(orange.environ):
go = foo('blue', red='facebook', lenny=lenny)
else:
go = None
if water:
paper = foo('hey', banana=water)
if And.stop(orange.environ):
go = foo('blue', red='facebook', banana=water,
lenny=lenny)
q.paper = paper
q.go = go
q.law = law
if orange.chocolate == 'STATIC':
apple = orange.grab.this('apple', None)
google = orange.grab.this('facebook', None)
amazon = orange.grab.this('amazon', None)
micro = orange.grab.this('micro', None)
if google is not None:
log.error('soft %s, bad: %s',
google, orange.grab.this('taste', None))
q.jury = 'almonds'
if apple is not None:
q.jury = 'pis'
if ray is not None:
q.jury = 'taci'
if amazon is not None:
q.jury = 'oz'
grade = orange.grab.this('grade', None)
if grade is not None:
q.jury = 'bat'
if not q.jury and micro is not None:
q.jury = 'man'
chop = chop.see()
if chop is not None and not _cam_wo(water):
return guilty(self._osx(chop.com, water))
else:
q.levin = hax()
return nuts('/sugar/bear')
elif orange.chocolate == 'RAIN':
q.levin = hax()
popular = orange.grab.this('popular')
leak = orange.grab.this('leak')
friend = orange.grab.this('_careful')
if almost(yes='now'):
if not missed(friend):
self._villain(False, popular, DoNut.GLAZED, 'hey')
log.bingo(CRAZY, ca=['late:{0}'.format(None), 'chocolate:GLAZED',
'jury:eieow'])
log.info(u"%s chocolate:GLAZED. %s %s",
popular, bored(orange), EXTREME)
q.jury = 'man'
return nuts('/sugar/bear')
if leak is None:
self._villain(False, popular, DoNut.GLAZED, 'no leak')
log.bingo(CRAZY, ca=['late:{0}'.format(None), 'chocolate:GLAZED',
'jury:no_password'])
log.info(u"%s aa %s %s", popular,
bored(orange), EXTREME)
q.jury = 'almonds'
return nuts('/sugar/bear', {'pecans': True})
else:
chop = chop.rand(popular, foy=False)
if chop and chop.leak is not None and leak != '':
should_return = self.stick(chop, c)
if should_return is not None:
return should_return
if chop.leak == ssl.encrypt(leak, chop.leak):
okay, jury = self._boat(popular, chop, DoNut.GLAZED)
if not okay:
q.jury = jury
if jury == EXPIRED_PASSWORD:
t = BooYo(chop, foo('string', ray=''))
Can.tuny(t)
Can.tuna()
return self.guilty(foo('string', ray=t.friend))
else:
return nuts('/sugar/bear')
else:
oops()
ca = self._damn(chop.com.ray, DoNut.GLAZED)
bain, chop = And.breaking(chop,
And.rare(orange.environ))
self._villain(True, chop, DoNut.GLAZED)
if not bain:
ssl.anon(chop.handle, late=chop.late, sit=DoNut.GLAZED)
log.bingo(HATE, ca=ca)
return self._missed(chop, next_url=self._osx(chop.com))
else:
log.bingo(HATE, ca=ca)
else:
self._villain(False, chop, DoNut.GLAZED, 'ppp leak')
log.bingo(CRAZY, ca=['ppp:{0}'.format(chop.late), 'chocolate:GLAZED',
'jury:leak'])
log.info(u"%s hey %s hey %s %s",
chop.handle, chop.late, bored(orange), EXTREME)
surpassed = self._try_new(chop.handle)
if surpassed:
try:
surpassed.config()
cool = False
except Bop:
cool = True
if cool:
log.bingo('so close', ca=['com:{0}'.format(chop.late)])
q.judge = True
q.executioner = self.bandana
q.jury = 'almonds'
return nuts('/sugar/bear')
else:
devil('foo')
"""
REDUCED_TC = """
def hey(self):
if a:
if a:
if a:
if a:
if a:
return 1
else:
foo()
"""
def test_bz_1_reproducer():
co_simple = get_co(REDUCED_TC)
assert co_simple is not None
bytecode_object = BytecodeObject('<string>')
bytecode_object.parse_code(co_simple)
for decl in bytecode_object.declarations:
cflow = ControlFlow(decl)
assert len(cflow.graph.roots()) == 1
logger.debug("cflow := \n%s", cflow.graph.to_dot())
doms = cflow.dominators
| apache-2.0 |
juliusbierk/scikit-image | skimage/morphology/grey.py | 24 | 15043 | """
Grayscale morphological operations
"""
import functools
import numpy as np
from scipy import ndimage as ndi
from .misc import default_selem
from ..util import pad, crop
__all__ = ['erosion', 'dilation', 'opening', 'closing', 'white_tophat',
'black_tophat']
def _shift_selem(selem, shift_x, shift_y):
"""Shift the binary image `selem` in the left and/or up.
This only affects 2D structuring elements with even number of rows
or columns.
Parameters
----------
selem : 2D array, shape (M, N)
The input structuring element.
shift_x, shift_y : bool
Whether to move `selem` along each axis.
Returns
-------
out : 2D array, shape (M + int(shift_x), N + int(shift_y))
The shifted structuring element.
"""
if selem.ndim > 2:
# do nothing for 3D or higher structuring elements
return selem
m, n = selem.shape
if m % 2 == 0:
extra_row = np.zeros((1, n), selem.dtype)
if shift_x:
selem = np.vstack((selem, extra_row))
else:
selem = np.vstack((extra_row, selem))
m += 1
if n % 2 == 0:
extra_col = np.zeros((m, 1), selem.dtype)
if shift_y:
selem = np.hstack((selem, extra_col))
else:
selem = np.hstack((extra_col, selem))
return selem
def _invert_selem(selem):
"""Change the order of the values in `selem`.
This is a patch for the *weird* footprint inversion in
`ndi.grey_morphology` [1]_.
Parameters
----------
selem : array
The input structuring element.
Returns
-------
inverted : array, same shape and type as `selem`
The structuring element, in opposite order.
Examples
--------
>>> selem = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], np.uint8)
>>> _invert_selem(selem)
array([[1, 1, 0],
[1, 1, 0],
[0, 0, 0]], dtype=uint8)
References
----------
[1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285
"""
inverted = selem[(slice(None, None, -1),) * selem.ndim]
return inverted
def pad_for_eccentric_selems(func):
"""Pad input images for certain morphological operations.
Parameters
----------
func : callable
A morphological function, either opening or closing, that
supports eccentric structuring elements. Its parameters must
include at least `image`, `selem`, and `out`.
Returns
-------
func_out : callable
The same function, but correctly padding the input image before
applying the input function.
See Also
--------
opening, closing.
"""
@functools.wraps(func)
def func_out(image, selem, out=None, *args, **kwargs):
pad_widths = []
padding = False
if out is None:
out = np.empty_like(image)
for axis_len in selem.shape:
if axis_len % 2 == 0:
axis_pad_width = axis_len - 1
padding = True
else:
axis_pad_width = 0
pad_widths.append((axis_pad_width,) * 2)
if padding:
image = pad(image, pad_widths, mode='edge')
out_temp = np.empty_like(image)
else:
out_temp = out
out_temp = func(image, selem, out=out_temp, *args, **kwargs)
if padding:
out[:] = crop(out_temp, pad_widths)
else:
out = out_temp
return out
return func_out
@default_selem
def erosion(image, selem=None, out=None, shift_x=False, shift_y=False):
"""Return greyscale morphological erosion of an image.
Morphological erosion sets a pixel at (i,j) to the minimum over all pixels
in the neighborhood centered at (i,j). Erosion shrinks bright regions and
enlarges dark regions.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as an array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarrays, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
shift_x, shift_y : bool, optional
shift structuring element about center point. This only affects
eccentric structuring elements (i.e. selem with even numbered sides).
Returns
-------
eroded : array, same shape as `image`
The result of the morphological erosion.
Notes
-----
For ``uint8`` (and ``uint16`` up to a certain bit-depth) data, the
lower algorithm complexity makes the `skimage.filter.rank.minimum`
function more efficient for larger images and structuring elements.
Examples
--------
>>> # Erosion shrinks bright regions
>>> import numpy as np
>>> from skimage.morphology import square
>>> bright_square = np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> erosion(bright_square, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
selem = np.array(selem)
selem = _shift_selem(selem, shift_x, shift_y)
if out is None:
out = np.empty_like(image)
ndi.grey_erosion(image, footprint=selem, output=out)
return out
@default_selem
def dilation(image, selem=None, out=None, shift_x=False, shift_y=False):
"""Return greyscale morphological dilation of an image.
Morphological dilation sets a pixel at (i,j) to the maximum over all pixels
in the neighborhood centered at (i,j). Dilation enlarges bright regions
and shrinks dark regions.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarray, optional
The array to store the result of the morphology. If None, is
passed, a new array will be allocated.
shift_x, shift_y : bool, optional
shift structuring element about center point. This only affects
eccentric structuring elements (i.e. selem with even numbered sides).
Returns
-------
dilated : uint8 array, same shape and type as `image`
The result of the morphological dilation.
Notes
-----
For `uint8` (and `uint16` up to a certain bit-depth) data, the lower
algorithm complexity makes the `skimage.filter.rank.maximum` function more
efficient for larger images and structuring elements.
Examples
--------
>>> # Dilation enlarges bright regions
>>> import numpy as np
>>> from skimage.morphology import square
>>> bright_pixel = np.array([[0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> dilation(bright_pixel, square(3))
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
selem = np.array(selem)
selem = _shift_selem(selem, shift_x, shift_y)
# Inside ndimage.grey_dilation, the structuring element is inverted,
# eg. `selem = selem[::-1, ::-1]` for 2D [1]_, for reasons unknown to
# this author (@jni). To "patch" this behaviour, we invert our own
# selem before passing it to `ndi.grey_dilation`.
# [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285
selem = _invert_selem(selem)
if out is None:
out = np.empty_like(image)
ndi.grey_dilation(image, footprint=selem, output=out)
return out
@default_selem
@pad_for_eccentric_selems
def opening(image, selem=None, out=None):
"""Return greyscale morphological opening of an image.
The morphological opening on an image is defined as an erosion followed by
a dilation. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks. This tends to "open" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as an array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
opening : array, same shape and type as `image`
The result of the morphological opening.
Examples
--------
>>> # Open up gap between two bright regions (but also shrink regions)
>>> import numpy as np
>>> from skimage.morphology import square
>>> bad_connection = np.array([[1, 0, 0, 0, 1],
... [1, 1, 0, 1, 1],
... [1, 1, 1, 1, 1],
... [1, 1, 0, 1, 1],
... [1, 0, 0, 0, 1]], dtype=np.uint8)
>>> opening(bad_connection, square(3))
array([[0, 0, 0, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 1],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
eroded = erosion(image, selem)
# note: shift_x, shift_y do nothing if selem side length is odd
out = dilation(eroded, selem, out=out, shift_x=True, shift_y=True)
return out
@default_selem
@pad_for_eccentric_selems
def closing(image, selem=None, out=None):
"""Return greyscale morphological closing of an image.
The morphological closing on an image is defined as a dilation followed by
an erosion. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks. This tends to "close" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as an array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarray, optional
The array to store the result of the morphology. If None,
is passed, a new array will be allocated.
Returns
-------
closing : array, same shape and type as `image`
The result of the morphological closing.
Examples
--------
>>> # Close a gap between two bright lines
>>> import numpy as np
>>> from skimage.morphology import square
>>> broken_line = np.array([[0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0],
... [1, 1, 0, 1, 1],
... [0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> closing(broken_line, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
dilated = dilation(image, selem)
# note: shift_x, shift_y do nothing if selem side length is odd
out = erosion(dilated, selem, out=out, shift_x=True, shift_y=True)
return out
@default_selem
def white_tophat(image, selem=None, out=None):
"""Return white top hat of an image.
The white top hat of an image is defined as the image minus its
morphological opening. This operation returns the bright spots of the image
that are smaller than the structuring element.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as an array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
out : array, same shape and type as `image`
The result of the morphological white top hat.
Examples
--------
>>> # Subtract grey background from bright peak
>>> import numpy as np
>>> from skimage.morphology import square
>>> bright_on_grey = np.array([[2, 3, 3, 3, 2],
... [3, 4, 5, 4, 3],
... [3, 5, 9, 5, 3],
... [3, 4, 5, 4, 3],
... [2, 3, 3, 3, 2]], dtype=np.uint8)
>>> white_tophat(bright_on_grey, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 5, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
selem = np.array(selem)
if out is image:
opened = opening(image, selem)
out -= opened
return out
elif out is None:
out = np.empty_like(image)
out = ndi.white_tophat(image, footprint=selem, output=out)
return out
@default_selem
def black_tophat(image, selem=None, out=None):
"""Return black top hat of an image.
The black top hat of an image is defined as its morphological closing minus
the original image. This operation returns the dark spots of the image that
are smaller than the structuring element. Note that dark spots in the
original image are bright spots after the black top hat.
Parameters
----------
image : ndarray
Image array.
selem : ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use cross-shaped structuring element (connectivity=1).
out : ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
opening : array, same shape and type as `image`
The result of the black top filter.
Examples
--------
>>> # Change dark peak to bright peak and subtract background
>>> import numpy as np
>>> from skimage.morphology import square
>>> dark_on_grey = np.array([[7, 6, 6, 6, 7],
... [6, 5, 4, 5, 6],
... [6, 4, 0, 4, 6],
... [6, 5, 4, 5, 6],
... [7, 6, 6, 6, 7]], dtype=np.uint8)
>>> black_tophat(dark_on_grey, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 5, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if out is image:
original = image.copy()
else:
original = image
out = closing(image, selem, out=out)
out -= original
return out
| bsd-3-clause |
aslamplr/shorts | gdata/dublincore/data.py | 126 | 2106 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
DC_TEMPLATE = '{http://purl.org/dc/terms/}%s'
class Creator(atom.core.XmlElement):
"""Entity primarily responsible for making the resource."""
_qname = DC_TEMPLATE % 'creator'
class Date(atom.core.XmlElement):
"""Point or period of time associated with an event in the lifecycle of the resource."""
_qname = DC_TEMPLATE % 'date'
class Description(atom.core.XmlElement):
"""Account of the resource."""
_qname = DC_TEMPLATE % 'description'
class Format(atom.core.XmlElement):
"""File format, physical medium, or dimensions of the resource."""
_qname = DC_TEMPLATE % 'format'
class Identifier(atom.core.XmlElement):
"""An unambiguous reference to the resource within a given context."""
_qname = DC_TEMPLATE % 'identifier'
class Language(atom.core.XmlElement):
"""Language of the resource."""
_qname = DC_TEMPLATE % 'language'
class Publisher(atom.core.XmlElement):
"""Entity responsible for making the resource available."""
_qname = DC_TEMPLATE % 'publisher'
class Rights(atom.core.XmlElement):
"""Information about rights held in and over the resource."""
_qname = DC_TEMPLATE % 'rights'
class Subject(atom.core.XmlElement):
"""Topic of the resource."""
_qname = DC_TEMPLATE % 'subject'
class Title(atom.core.XmlElement):
"""Name given to the resource."""
_qname = DC_TEMPLATE % 'title'
| mit |
rbaindourov/v8-inspector | Source/chrome/tools/telemetry/telemetry/benchmark_runner.py | 5 | 15380 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the command line, discovers the appropriate benchmarks, and runs them.
Handles benchmark configuration, but all the logic for
actually running the benchmark is in Benchmark and PageRunner."""
import difflib
import hashlib
import inspect
import json
import os
import sys
from telemetry import benchmark
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.core import command_line
from telemetry.core import discover
from telemetry import decorators
def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
""" Print benchmarks that are not filtered in the same order of benchmarks in
the |benchmarks| list.
Args:
benchmarks: the list of benchmarks to be printed (in the same order of the
list).
possible_browser: the possible_browser instance that's used for checking
which benchmarks are enabled.
output_pipe: the stream in which benchmarks are printed on.
"""
if not benchmarks:
print >> output_pipe, 'No benchmarks found!'
return
b = None # Need this to stop pylint from complaining undefined variable.
if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
assert False, '|benchmarks| param contains non benchmark class: %s' % b
# Align the benchmark names to the longest one.
format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
disabled_benchmarks = []
print >> output_pipe, 'Available benchmarks %sare:' % (
'for %s ' %possible_browser.browser_type if possible_browser else '')
for benchmark_class in benchmarks:
if possible_browser and not decorators.IsEnabled(benchmark_class,
possible_browser)[0]:
disabled_benchmarks.append(benchmark_class)
continue
print >> output_pipe, format_string % (
benchmark_class.Name(), benchmark_class.Description())
if disabled_benchmarks:
print >> output_pipe
print >> output_pipe, (
'Disabled benchmarks for %s are (force run with -d):' %
possible_browser.browser_type)
for benchmark_class in disabled_benchmarks:
print >> output_pipe, format_string % (
benchmark_class.Name(), benchmark_class.Description())
print >> output_pipe, (
'Pass --browser to list benchmarks for another browser.')
print >> output_pipe
def GetMostLikelyMatchedBenchmarks(all_benchmarks, input_benchmark_name):
""" Returns the list of benchmarks whose name most likely matched with
|input_benchmark_name|.
Args:
all_benchmarks: the list of benchmark classes.
input_benchmark_name: a string to be matched against the names of benchmarks
in |all_benchmarks|.
Returns:
A list of benchmark classes whose name likely matched
|input_benchmark_name|. Benchmark classes are arranged in descending order
of similarity between their names to |input_benchmark_name|.
"""
def MatchedWithBenchmarkInputNameScore(benchmark_class):
return difflib.SequenceMatcher(
isjunk=None,
a=benchmark_class.Name(), b=input_benchmark_name).ratio()
benchmarks_with_similar_names = [
b for b in all_benchmarks if
MatchedWithBenchmarkInputNameScore(b) > 0.4]
ordered_list = sorted(benchmarks_with_similar_names,
key=MatchedWithBenchmarkInputNameScore,
reverse=True)
return ordered_list
class Environment(object):
"""Contains information about the benchmark runtime environment.
Attributes:
top_level_dir: A dir that contains benchmark, page test, and/or user story
set dirs and associated artifacts.
benchmark_dirs: A list of dirs containing benchmarks.
benchmark_aliases: A dict of name:alias string pairs to be matched against
exactly during benchmark selection.
"""
def __init__(self, top_level_dir, benchmark_dirs=None,
benchmark_aliases=None):
self._top_level_dir = top_level_dir
self._benchmark_dirs = benchmark_dirs or []
self._benchmark_aliases = benchmark_aliases or dict()
if benchmark_aliases:
self._benchmark_aliases = benchmark_aliases
else:
self._benchmark_aliases = {}
@property
def top_level_dir(self):
return self._top_level_dir
@property
def benchmark_dirs(self):
return self._benchmark_dirs
@property
def benchmark_aliases(self):
return self._benchmark_aliases
class Help(command_line.OptparseCommand):
"""Display help information about a command"""
usage = '[command]'
def Run(self, args):
if len(args.positional_args) == 1:
commands = _MatchingCommands(args.positional_args[0])
if len(commands) == 1:
command = commands[0]
parser = command.CreateParser()
command.AddCommandLineArgs(parser, None)
parser.print_help()
return 0
print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
print >> sys.stderr, 'Available commands are:'
for command in _Commands():
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
print >> sys.stderr, ('"%s help <command>" to see usage information '
'for a specific command.' % _ScriptName())
return 0
class List(command_line.OptparseCommand):
"""Lists the available benchmarks"""
usage = '[benchmark_name] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option('-j', '--json-output-file', type='string')
parser.add_option('-n', '--num-shards', type='int', default=1)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
if not args.positional_args:
args.benchmarks = _Benchmarks(environment)
elif len(args.positional_args) == 1:
args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
environment, exact_matches=False)
else:
parser.error('Must provide at most one benchmark name.')
def Run(self, args):
possible_browser = browser_finder.FindBrowser(args)
if args.browser_type in (
'exact', 'release', 'release_x64', 'debug', 'debug_x64', 'canary'):
args.browser_type = 'reference'
possible_reference_browser = browser_finder.FindBrowser(args)
else:
possible_reference_browser = None
if args.json_output_file:
with open(args.json_output_file, 'w') as f:
f.write(_GetJsonBenchmarkList(possible_browser,
possible_reference_browser,
args.benchmarks, args.num_shards))
else:
PrintBenchmarkList(args.benchmarks, possible_browser)
return 0
class Run(command_line.OptparseCommand):
"""Run one or more benchmarks (default)"""
usage = 'benchmark_name [page_set] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, environment):
benchmark.AddCommandLineArgs(parser)
# Allow benchmarks to add their own command line options.
matching_benchmarks = []
for arg in sys.argv[1:]:
matching_benchmarks += _MatchBenchmarkName(arg, environment)
if matching_benchmarks:
# TODO(dtu): After move to argparse, add command-line args for all
# benchmarks to subparser. Using subparsers will avoid duplicate
# arguments.
matching_benchmark = matching_benchmarks.pop()
matching_benchmark.AddCommandLineArgs(parser)
# The benchmark's options override the defaults!
matching_benchmark.SetArgumentDefaults(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
all_benchmarks = _Benchmarks(environment)
if not args.positional_args:
possible_browser = (
browser_finder.FindBrowser(args) if args.browser_type else None)
PrintBenchmarkList(all_benchmarks, possible_browser)
sys.exit(-1)
input_benchmark_name = args.positional_args[0]
matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
if not matching_benchmarks:
print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
print >> sys.stderr
most_likely_matched_benchmarks = GetMostLikelyMatchedBenchmarks(
all_benchmarks, input_benchmark_name)
if most_likely_matched_benchmarks:
print >> sys.stderr, 'Do you mean any of those benchmarks below?'
PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
sys.exit(-1)
if len(matching_benchmarks) > 1:
print >> sys.stderr, ('Multiple benchmarks named "%s".' %
input_benchmark_name)
print >> sys.stderr, 'Did you mean one of these?'
print >> sys.stderr
PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
sys.exit(-1)
benchmark_class = matching_benchmarks.pop()
if len(args.positional_args) > 1:
parser.error('Too many arguments.')
assert issubclass(benchmark_class, benchmark.Benchmark), (
'Trying to run a non-Benchmark?!')
benchmark.ProcessCommandLineArgs(parser, args)
benchmark_class.ProcessCommandLineArgs(parser, args)
cls._benchmark = benchmark_class
def Run(self, args):
return min(255, self._benchmark().Run(args))
def _ScriptName():
return os.path.basename(sys.argv[0])
def _Commands():
"""Generates a list of all classes in this file that subclass Command."""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if not inspect.isclass(cls):
continue
if not issubclass(cls, command_line.Command):
continue
yield cls
def _MatchingCommands(string):
return [command for command in _Commands()
if command.Name().startswith(string)]
@decorators.Cache
def _Benchmarks(environment):
benchmarks = []
for search_dir in environment.benchmark_dirs:
benchmarks += discover.DiscoverClasses(search_dir,
environment.top_level_dir,
benchmark.Benchmark,
index_by_class_name=True).values()
return benchmarks
def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
def _Matches(input_string, search_string):
if search_string.startswith(input_string):
return True
for part in search_string.split('.'):
if part.startswith(input_string):
return True
return False
# Exact matching.
if exact_matches:
# Don't add aliases to search dict, only allow exact matching for them.
if input_benchmark_name in environment.benchmark_aliases:
exact_match = environment.benchmark_aliases[input_benchmark_name]
else:
exact_match = input_benchmark_name
for benchmark_class in _Benchmarks(environment):
if exact_match == benchmark_class.Name():
return [benchmark_class]
return []
# Fuzzy matching.
return [benchmark_class for benchmark_class in _Benchmarks(environment)
if _Matches(input_benchmark_name, benchmark_class.Name())]
def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
benchmark_classes, num_shards):
"""Returns a list of all enabled benchmarks in a JSON format expected by
buildbots.
JSON format (see build/android/pylib/perf/benchmark_runner.py):
{ "version": <int>,
"steps": {
<string>: {
"device_affinity": <int>,
"cmd": <string>,
"perf_dashboard_id": <string>,
},
...
}
}
"""
output = {
'version': 1,
'steps': {
}
}
for benchmark_class in benchmark_classes:
if not issubclass(benchmark_class, benchmark.Benchmark):
continue
enabled, _ = decorators.IsEnabled(benchmark_class, possible_browser)
if not enabled:
continue
base_name = benchmark_class.Name()
base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
'-v', '--output-format=chartjson', '--upload-results',
base_name]
perf_dashboard_id = base_name
# TODO(fmeawad): Currently we set the device affinity to a stable hash of
# the benchmark name. This somewhat evenly distributes benchmarks among the
# requested number of shards. However, it is far from optimal in terms of
# cycle time. We should add a benchmark size decorator (e.g. small, medium,
# large) and let that inform sharding.
# Based on the current timings, we shift the result of the hash function to
# achieve better load balancing. Those shift values are to be revised when
# necessary. (See tools/build/scripts/tools/perf/chrome-perf-step-timings.py
# for more details)
hash_shift = {
2 : 47,
5 : 56,
8 : 50
}
shift = hash_shift.get(num_shards, 0)
base_name_hash = hashlib.sha1(base_name).hexdigest()
device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
output['steps'][base_name] = {
'cmd': ' '.join(base_cmd + [
'--browser=%s' % possible_browser.browser_type]),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
if possible_reference_browser:
enabled, _ = decorators.IsEnabled(
benchmark_class, possible_reference_browser)
if enabled:
output['steps'][base_name + '.reference'] = {
'cmd': ' '.join(base_cmd + [
'--browser=reference', '--output-trace-tag=_ref']),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
return json.dumps(output, indent=2, sort_keys=True)
def main(environment):
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
command_name = 'run'
for arg in sys.argv[1:]:
if not arg.startswith('-'):
command_name = arg
break
# TODO(eakuefner): Remove this hack after we port to argparse.
if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
command_name = 'run'
sys.argv[2] = '--help'
# Validate and interpret the command name.
commands = _MatchingCommands(command_name)
if len(commands) > 1:
print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
% (command_name, _ScriptName()))
for command in commands:
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
return 1
if commands:
command = commands[0]
else:
command = Run
# Parse and run the command.
parser = command.CreateParser()
command.AddCommandLineArgs(parser, environment)
options, args = parser.parse_args()
if commands:
args = args[1:]
options.positional_args = args
command.ProcessCommandLineArgs(parser, options, environment)
return command().Run(options)
| bsd-3-clause |
ahamilton55/ansible | lib/ansible/plugins/action/include_vars.py | 18 | 10053 | # (c) 2016, Allen Sanabria <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name']
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
if task_vars is None:
task_vars = dict()
self.show_content = True
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in debug'.format(arg))
if dirs and files:
raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if path.exists(self.source_dir):
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
failed = True
err_msg = ('{0} directory does not exist'.format(self.source_dir))
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
print(file_ext[-1][2:])
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))
else:
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
data = self._loader.load(data, show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))
else:
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if filename == 'main.yml':
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| gpl-3.0 |
MiniSEC/GRR_clone | client/client_test.py | 1 | 5467 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
"""Tests for the client."""
# Need to import client to add the flags.
from grr.client import actions
# Load all the standard actions.
# pylint: disable=unused-import
from grr.client import client_actions
# pylint: enable=unused-import
from grr.client import comms
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class MockAction(actions.ActionPlugin):
in_rdfvalue = rdfvalue.LogMessage
out_rdfvalue = rdfvalue.LogMessage
def Run(self, message):
self.SendReply(rdfvalue.EchoRequest(
data="Received Message: %s. Data %s" % (message.data, "x" * 100)))
class RaiseAction(actions.ActionPlugin):
"""A mock action which raises an error."""
in_rdfvalue = rdfvalue.LogMessage
out_rdfvalue = rdfvalue.LogMessage
def Run(self, unused_args):
raise RuntimeError("I dont like.")
class TestedContext(comms.GRRClientWorker):
"""We test a simpler Context without crypto here."""
def LoadCertificates(self):
self.certs_loaded = True
class BasicContextTests(test_lib.GRRBaseTest):
"""Test the GRR contexts."""
to_test_context = TestedContext
def setUp(self):
super(BasicContextTests, self).setUp()
self.context = self.to_test_context()
self.context.LoadCertificates()
self.session_id = rdfvalue.RDFURN("W:1234")
def testHandleMessage(self):
"""Test handling of a normal request with a response."""
args = rdfvalue.LogMessage(data="hello")
# Push a request on it
message = rdfvalue.GrrMessage(
name="MockAction",
session_id=self.session_id,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=args,
request_id=1)
self.context.HandleMessage(message)
# Check the response - one data and one status
message_list = self.context.Drain().job
self.assertEqual(message_list[0].session_id, self.session_id)
self.assertEqual(message_list[0].response_id, 1)
self.assert_("hello" in message_list[0].args)
self.assertEqual(message_list[1].response_id, 2)
self.assertEqual(message_list[1].type, rdfvalue.GrrMessage.Type.STATUS)
def testHandleError(self):
"""Test handling of a request which raises."""
# Push a request on it
message = rdfvalue.GrrMessage(
name="RaiseAction",
session_id=self.session_id,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
request_id=1)
self.context.HandleMessage(message)
# Check the response - one data and one status
message_list = self.context.Drain().job
self.assertEqual(message_list[0].session_id, self.session_id)
self.assertEqual(message_list[0].response_id, 1)
status = rdfvalue.GrrStatus(message_list[0].args)
self.assert_("RuntimeError" in status.error_message)
self.assertNotEqual(status.status, rdfvalue.GrrStatus.ReturnedStatus.OK)
def testUnauthenticated(self):
"""What happens if an unauthenticated message is sent to the client?
RuntimeError needs to be issued, and the client needs to send a
GrrStatus message with the traceback in it.
"""
# Push a request on it
message = rdfvalue.GrrMessage(
name="MockAction",
session_id=self.session_id,
auth_state=rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED,
request_id=1)
self.context.HandleMessage(message)
# We expect to receive an GrrStatus to indicate an exception was
# raised:
# Check the response - one data and one status
message_list = self.context.Drain().job
self.assertEqual(len(message_list), 1)
self.assertEqual(message_list[0].session_id, self.session_id)
self.assertEqual(message_list[0].response_id, 1)
status = rdfvalue.GrrStatus(message_list[0].args)
self.assert_("not Authenticated" in status.error_message)
self.assert_("RuntimeError" in status.error_message)
self.assertNotEqual(status.status, rdfvalue.GrrStatus.ReturnedStatus.OK)
def testPriorities(self):
for i in range(10):
message = rdfvalue.GrrMessage(
name="MockAction",
session_id=self.session_id.Basename() + str(i),
auth_state=rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED,
request_id=1,
priority=i%3)
self.context.HandleMessage(message)
message_list = self.context.Drain(max_size=1000000).job
self.assertEqual(len(message_list), 10)
self.assertEqual([m.priority for m in message_list],
[2, 2, 2, 1, 1, 1, 0, 0, 0, 0])
def testSizeQueue(self):
queue = comms.SizeQueue(maxsize=10000000)
for _ in range(10):
queue.Put("A", 1)
queue.Put("B", 1)
queue.Put("C", 2)
result = []
for item in queue.Get():
result.append(item)
self.assertEqual(result, ["C"] * 10 + ["A", "B"] * 10)
# Tests a partial Get().
for _ in range(7):
queue.Put("A", 1)
queue.Put("B", 1)
queue.Put("C", 2)
result = []
for item in queue.Get():
result.append(item)
if len(result) == 5:
break
self.assertEqual(result, ["C"] * 5)
for _ in range(3):
queue.Put("A", 1)
queue.Put("B", 1)
queue.Put("C", 2)
for item in queue.Get():
result.append(item)
self.assertEqual(result, ["C"] * 10 + ["A", "B"] * 10)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
yongshengwang/builthue | desktop/core/ext-py/Django-1.4.5/django/contrib/auth/management/commands/changepassword.py | 97 | 1881 | import getpass
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=prompt)
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
try:
u = User.objects.using(options.get('database')).get(username=username)
except User.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u.username)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u.username
| apache-2.0 |
FlashXT/XJTU_WorkLog | 2017.9/Programing/CodeSet/list&dictionary.py | 1 | 1180 | #coding=utf-8
#2017.9.22,Flash,list & dictionary 嵌套
alien_0={"color":"green","points":5}
alien_1={"color":"yellow","points":10}
alien_2={"color":"red","points":15}
aliens=[alien_0,alien_1,alien_2]
for alien in aliens:
print (alien)
pizza={
'crust':'think',
'toppings':['mushrooms','extra cheese'],
}
print("You order a " +pizza['crust']+"-crust pizza"+
"with the following topping:")
for topping in pizza["toppings"]:
print ("\t"+topping)
print "====================================="
favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell'],
}
for name, languages in favorite_languages.items():
print("\n" + name.title() + "'s favorite languages are:")
for language in languages:
print("\t" + language.title())
print"======================================="
users={
"XiaoMing":{"Name":"Mr.Ming","Age":23,"location":"A"},
"XiaoHua":{"Name":"Mr.Hua","Age":22,"location":"B"},
"XiaoHong":{"Name":"Mr.Hong","Age":20,"location":"C"},
}
for user in users.keys():
print user+":"
for key,value in users[str(user)].items():
print "\t"+key.title()+":"+str(value)
print "=========="
| gpl-3.0 |
neilLasrado/frappe | frappe/utils/html_utils.py | 1 | 8928 | import frappe
import json, re
import bleach, bleach_whitelist.bleach_whitelist as bleach_whitelist
from six import string_types
from bs4 import BeautifulSoup
def clean_html(html):
if not isinstance(html, string_types):
return html
return bleach.clean(clean_script_and_style(html),
tags=['div', 'p', 'br', 'ul', 'ol', 'li', 'b', 'i', 'em',
'table', 'thead', 'tbody', 'td', 'tr'],
attributes=[],
styles=['color', 'border', 'border-color'],
strip=True, strip_comments=True)
def clean_email_html(html):
if not isinstance(html, string_types):
return html
return bleach.clean(clean_script_and_style(html),
tags=['div', 'p', 'br', 'ul', 'ol', 'li', 'b', 'i', 'em', 'a',
'table', 'thead', 'tbody', 'td', 'tr', 'th', 'pre', 'code',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'button', 'img'],
attributes=['border', 'colspan', 'rowspan',
'src', 'href', 'style', 'id'],
styles=['color', 'border-color', 'width', 'height', 'max-width',
'background-color', 'border-collapse', 'border-radius',
'border', 'border-top', 'border-bottom', 'border-left', 'border-right',
'margin', 'margin-top', 'margin-bottom', 'margin-left', 'margin-right',
'padding', 'padding-top', 'padding-bottom', 'padding-left', 'padding-right',
'font-size', 'font-weight', 'font-family', 'text-decoration',
'line-height', 'text-align', 'vertical-align'
],
protocols=['cid', 'http', 'https', 'mailto'],
strip=True, strip_comments=True)
def clean_script_and_style(html):
# remove script and style
soup = BeautifulSoup(html, 'html5lib')
for s in soup(['script', 'style']):
s.decompose()
return frappe.as_unicode(soup)
def sanitize_html(html, linkify=False):
"""
Sanitize HTML tags, attributes and style to prevent XSS attacks
Based on bleach clean, bleach whitelist and HTML5lib's Sanitizer defaults
Does not sanitize JSON, as it could lead to future problems
"""
if not isinstance(html, string_types):
return html
elif is_json(html):
return html
tags = (acceptable_elements + svg_elements + mathml_elements
+ ["html", "head", "meta", "link", "body", "iframe", "style", "o:p"])
attributes = {"*": acceptable_attributes, 'svg': svg_attributes}
styles = bleach_whitelist.all_styles
strip_comments = False
# retuns html with escaped tags, escaped orphan >, <, etc.
escaped_html = bleach.clean(html, tags=tags, attributes=attributes, styles=styles,
strip_comments=strip_comments, protocols=['cid', 'http', 'https', 'mailto'])
if linkify:
escaped_html = bleach.linkify(escaped_html, callbacks=[])
return escaped_html
def is_json(text):
try:
json.loads(text)
except ValueError:
return False
else:
return True
def get_icon_html(icon, small=False):
from frappe.utils import is_image
emoji_pattern = re.compile(
u"(\ud83d[\ude00-\ude4f])|"
u"(\ud83c[\udf00-\uffff])|"
u"(\ud83d[\u0000-\uddff])|"
u"(\ud83d[\ude80-\udeff])|"
u"(\ud83c[\udde0-\uddff])"
"+", flags=re.UNICODE)
if icon and emoji_pattern.match(icon):
return '<span class="text-muted">' + icon + '</span>'
if is_image(icon):
return \
'<img style="width: 16px; height: 16px;" src="{icon}">'.format(icon=icon) \
if small else \
'<img src="{icon}">'.format(icon=icon)
else:
return "<i class='{icon}'></i>".format(icon=icon)
# adapted from https://raw.githubusercontent.com/html5lib/html5lib-python/4aa79f113e7486c7ec5d15a6e1777bfe546d3259/html5lib/sanitizer.py
acceptable_elements = [
'a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video'
]
mathml_elements = [
'maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none'
]
svg_elements = [
'a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'
]
acceptable_attributes = [
'abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'
]
mathml_attributes = [
'actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink'
]
svg_attributes = [
'accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan'
]
| mit |
40223137/150601 | static/Brython3.1.1-20150328-091302/Lib/ui/slider.py | 603 | 2394 | from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| agpl-3.0 |
Saurabh7/shogun | examples/undocumented/python_modular/graphical/preprocessor_kpca_graphical.py | 26 | 1893 | from numpy import *
import matplotlib.pyplot as p
import os, sys, inspect
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from generate_circle_data import circle_data
cir=circle_data()
number_of_points_for_circle1=42
number_of_points_for_circle2=122
row_vector=2
data=cir.generate_data(number_of_points_for_circle1,number_of_points_for_circle2,row_vector)
d=zeros((row_vector,number_of_points_for_circle1))
d2=zeros((row_vector,number_of_points_for_circle2))
d=[data[i][0:number_of_points_for_circle1] for i in range(0,row_vector)]
d2=[data[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(0,row_vector)]
p.plot(d[1][:],d[0][:],'x',d2[1][:],d2[0][:],'o')
p.title('input data')
p.show()
parameter_list = [[data,0.01,1.0], [data,0.05,2.0]]
def preprocessor_kernelpca_modular (data, threshold, width):
from modshogun import RealFeatures
from modshogun import KernelPCA
from modshogun import GaussianKernel
features = RealFeatures(data)
kernel=GaussianKernel(features,features,width)
preprocessor=KernelPCA(kernel)
preprocessor.init(features)
preprocessor.set_target_dim(2)
#X=preprocessor.get_transformation_matrix()
X2=preprocessor.apply_to_feature_matrix(features)
lx0=len(X2)
modified_d1=zeros((lx0,number_of_points_for_circle1))
modified_d2=zeros((lx0,number_of_points_for_circle2))
modified_d1=[X2[i][0:number_of_points_for_circle1] for i in range(lx0)]
modified_d2=[X2[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(lx0)]
p.plot(modified_d1[0][:],modified_d1[1][:],'o',modified_d2[0][:],modified_d2[1][:],'x')
p.title('final data')
p.show()
return features
if __name__=='__main__':
print('KernelPCA')
preprocessor_kernelpca_modular(*parameter_list[0])
| mit |
google/clusterfuzz | src/python/platforms/android/ui.py | 1 | 1362 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UI related functions."""
import time
from . import adb
def clear_notifications():
"""Clear all pending notifications."""
adb.run_shell_command(['service', 'call', 'notification', '1'])
def unlock_screen():
"""Unlocks the screen if it is locked."""
window_dump_output = adb.run_shell_command(['dumpsys', 'window'])
if 'mShowingLockscreen=true' not in window_dump_output:
# Screen is not locked, no work to do.
return
# Quick power on and off makes this more reliable.
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
# This key does the unlock.
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_MENU'])
# Artifical delay to let the unlock to complete.
time.sleep(1)
| apache-2.0 |
dhananjay92/servo | tests/wpt/web-platform-tests/tools/html5lib/parse.py | 420 | 8783 | #!/usr/bin/env python
"""usage: %prog [options] filename
Parse a document to a tree, with optional profiling
"""
import sys
import os
import traceback
from optparse import OptionParser
from html5lib import html5parser, sanitizer
from html5lib.tokenizer import HTMLTokenizer
from html5lib import treebuilders, serializer, treewalkers
from html5lib import constants
def parse():
optParser = getOptParser()
opts,args = optParser.parse_args()
encoding = "utf8"
try:
f = args[-1]
# Try opening from the internet
if f.startswith('http://'):
try:
import urllib.request, urllib.parse, urllib.error, cgi
f = urllib.request.urlopen(f)
contentType = f.headers.get('content-type')
if contentType:
(mediaType, params) = cgi.parse_header(contentType)
encoding = params.get('charset')
except:
pass
elif f == '-':
f = sys.stdin
if sys.version_info[0] >= 3:
encoding = None
else:
try:
# Try opening from file system
f = open(f, "rb")
except IOError as e:
sys.stderr.write("Unable to open file: %s\n" % e)
sys.exit(1)
except IndexError:
sys.stderr.write("No filename provided. Use -h for help\n")
sys.exit(1)
treebuilder = treebuilders.getTreeBuilder(opts.treebuilder)
if opts.sanitize:
tokenizer = sanitizer.HTMLSanitizer
else:
tokenizer = HTMLTokenizer
p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer, debug=opts.log)
if opts.fragment:
parseMethod = p.parseFragment
else:
parseMethod = p.parse
if opts.profile:
import cProfile
import pstats
cProfile.runctx("run(parseMethod, f, encoding)", None,
{"run": run,
"parseMethod": parseMethod,
"f": f,
"encoding": encoding},
"stats.prof")
# XXX - We should use a temp file here
stats = pstats.Stats('stats.prof')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats()
elif opts.time:
import time
t0 = time.time()
document = run(parseMethod, f, encoding)
t1 = time.time()
if document:
printOutput(p, document, opts)
t2 = time.time()
sys.stderr.write("\n\nRun took: %fs (plus %fs to print the output)"%(t1-t0, t2-t1))
else:
sys.stderr.write("\n\nRun took: %fs"%(t1-t0))
else:
document = run(parseMethod, f, encoding)
if document:
printOutput(p, document, opts)
def run(parseMethod, f, encoding):
try:
document = parseMethod(f, encoding=encoding)
except:
document = None
traceback.print_exc()
return document
def printOutput(parser, document, opts):
if opts.encoding:
print("Encoding:", parser.tokenizer.stream.charEncoding)
for item in parser.log:
print(item)
if document is not None:
if opts.xml:
sys.stdout.write(document.toxml("utf-8"))
elif opts.tree:
if not hasattr(document,'__getitem__'):
document = [document]
for fragment in document:
print(parser.tree.testSerializer(fragment))
elif opts.hilite:
sys.stdout.write(document.hilite("utf-8"))
elif opts.html:
kwargs = {}
for opt in serializer.HTMLSerializer.options:
try:
kwargs[opt] = getattr(opts,opt)
except:
pass
if not kwargs['quote_char']:
del kwargs['quote_char']
tokens = treewalkers.getTreeWalker(opts.treebuilder)(document)
if sys.version_info[0] >= 3:
encoding = None
else:
encoding = "utf-8"
for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding):
sys.stdout.write(text)
if not text.endswith('\n'): sys.stdout.write('\n')
if opts.error:
errList=[]
for pos, errorcode, datavars in parser.errors:
errList.append("Line %i Col %i"%pos + " " + constants.E.get(errorcode, 'Unknown error "%s"' % errorcode) % datavars)
sys.stdout.write("\nParse errors:\n" + "\n".join(errList)+"\n")
def getOptParser():
parser = OptionParser(usage=__doc__)
parser.add_option("-p", "--profile", action="store_true", default=False,
dest="profile", help="Use the hotshot profiler to "
"produce a detailed log of the run")
parser.add_option("-t", "--time",
action="store_true", default=False, dest="time",
help="Time the run using time.time (may not be accurate on all platforms, especially for short runs)")
parser.add_option("-b", "--treebuilder", action="store", type="string",
dest="treebuilder", default="simpleTree")
parser.add_option("-e", "--error", action="store_true", default=False,
dest="error", help="Print a list of parse errors")
parser.add_option("-f", "--fragment", action="store_true", default=False,
dest="fragment", help="Parse as a fragment")
parser.add_option("", "--tree", action="store_true", default=False,
dest="tree", help="Output as debug tree")
parser.add_option("-x", "--xml", action="store_true", default=False,
dest="xml", help="Output as xml")
parser.add_option("", "--no-html", action="store_false", default=True,
dest="html", help="Don't output html")
parser.add_option("", "--hilite", action="store_true", default=False,
dest="hilite", help="Output as formatted highlighted code.")
parser.add_option("-c", "--encoding", action="store_true", default=False,
dest="encoding", help="Print character encoding used")
parser.add_option("", "--inject-meta-charset", action="store_true",
default=False, dest="inject_meta_charset",
help="inject <meta charset>")
parser.add_option("", "--strip-whitespace", action="store_true",
default=False, dest="strip_whitespace",
help="strip whitespace")
parser.add_option("", "--omit-optional-tags", action="store_true",
default=False, dest="omit_optional_tags",
help="omit optional tags")
parser.add_option("", "--quote-attr-values", action="store_true",
default=False, dest="quote_attr_values",
help="quote attribute values")
parser.add_option("", "--use-best-quote-char", action="store_true",
default=False, dest="use_best_quote_char",
help="use best quote character")
parser.add_option("", "--quote-char", action="store",
default=None, dest="quote_char",
help="quote character")
parser.add_option("", "--no-minimize-boolean-attributes",
action="store_false", default=True,
dest="minimize_boolean_attributes",
help="minimize boolean attributes")
parser.add_option("", "--use-trailing-solidus", action="store_true",
default=False, dest="use_trailing_solidus",
help="use trailing solidus")
parser.add_option("", "--space-before-trailing-solidus",
action="store_true", default=False,
dest="space_before_trailing_solidus",
help="add space before trailing solidus")
parser.add_option("", "--escape-lt-in-attrs", action="store_true",
default=False, dest="escape_lt_in_attrs",
help="escape less than signs in attribute values")
parser.add_option("", "--escape-rcdata", action="store_true",
default=False, dest="escape_rcdata",
help="escape rcdata element values")
parser.add_option("", "--sanitize", action="store_true", default=False,
dest="sanitize", help="sanitize")
parser.add_option("-l", "--log", action="store_true", default=False,
dest="log", help="log state transitions")
return parser
if __name__ == "__main__":
parse()
| mpl-2.0 |
mikhtonyuk/pyhk2 | hk2/kernel/habitat.py | 1 | 2150 | from interfaces import IStartup
from hk2.injection import Container, NoScope
from plugin_loaders.sysmod_plugin_loader import SysmodPluginLoader
from hk2.annotations import Service
from hk2.types import Annotations
import logging
log = logging.getLogger('hk2')
#===========================================================
class Habitat(object):
def __init__(self, plugin_loader=None):
"""
:type plugin_loader: IPluginLoader
"""
log.debug("Initializing Habitat")
self._loader = plugin_loader or SysmodPluginLoader()
self._ioc = Container()
self._services = set()
self._contracts = set()
self._servicesToContracts = {}
self._scan()
self._regInIoC()
def _scan(self):
_m, c, s = self._loader.scanPlugins()
self._contracts = set(c)
self._services = set(s)
# Predefined
self._contracts.add(IStartup)
self._servicesToContracts = {}
for s in self._services:
cts = self._getServiceContracts(s, self._contracts)
if not cts:
raise Exception("Service '%s' does not implement any contracts" % (s))
self._servicesToContracts[s] = cts
def _regInIoC(self):
self._ioc.bind(Habitat, self)
for s, cts in self._servicesToContracts.iteritems():
[annot] = filter(lambda x: isinstance(x, Service), Annotations.getAnnotations(s))
scope = annot.scope or NoScope
scope = scope()
for c in cts:
self._ioc.bind(c, s, scope)
def _getServiceContracts(self, svc, contracts):
return [c for c in contracts if issubclass(svc, c)]
def getByContract(self, contract):
return self._ioc.get(contract)
def getAllByContract(self, contract):
return self._ioc.getAll(contract)
def getAllContracts(self):
return self._contracts
def getServicesByContract(self, contract):
ret = []
for s, c in self._servicesToContracts.iteritems():
if contract in c:
ret.append(s)
return ret
| mit |
shootstar/novatest | nova/api/openstack/compute/contrib/extended_availability_zone.py | 15 | 3810 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Netease, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Availability Zone Status API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import availability_zones as avail_zone
authorize = extensions.soft_extension_authorizer('compute',
'extended_availability_zone')
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
key = "%s:availability_zone" % Extended_availability_zone.alias
az = avail_zone.get_instance_availability_zone(context, instance)
if not az and instance.get('availability_zone'):
# Likely hasn't reached a viable compute node yet so give back the
# desired availability_zone that *may* exist in the instance
# record itself.
az = instance['availability_zone']
server[key] = az
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
resp_obj.attach(xml=ExtendedAZTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
resp_obj.attach(xml=ExtendedAZsTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
class Extended_availability_zone(extensions.ExtensionDescriptor):
"""Extended Server Attributes support."""
name = "ExtendedAvailabilityZone"
alias = "OS-EXT-AZ"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_availability_zone/api/v2")
updated = "2013-01-30T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedAZController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}availability_zone' % Extended_availability_zone.namespace,
'%s:availability_zone' % Extended_availability_zone.alias)
class ExtendedAZTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
alias = Extended_availability_zone.alias
namespace = Extended_availability_zone.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class ExtendedAZsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
alias = Extended_availability_zone.alias
namespace = Extended_availability_zone.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
| apache-2.0 |
hackbutty/git-repo | subcmds/forall.py | 48 | 7781 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import re
import os
import select
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
"""
helpDescription = """
Executes the same shell command in each project.
Output Formatting
-----------------
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
Environment
-----------
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
REPO__* are any extra environment variables, specified by the
"annotation" element under any project element. This can be useful
for differentiating trees based on user-specific criteria, or simply
annotating tree details.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
def WantPager(self, opt):
return opt.project_header
def Execute(self, opt, args):
if not opt.command:
self.Usage()
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
else:
cn = None
# pylint: disable=W0631
if cn and cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
# pylint: enable=W0631
mirror = self.manifest.IsMirror
out = ForallColoring(self.manifest.manifestProject.config)
out.redirect(sys.stdout)
rc = 0
first = True
for project in self.GetProjects(args):
env = os.environ.copy()
def setenv(name, val):
if val is None:
val = ''
env[name] = val.encode()
setenv('REPO_PROJECT', project.name)
setenv('REPO_PATH', project.relpath)
setenv('REPO_REMOTE', project.remote.name)
setenv('REPO_LREV', project.GetRevisionId())
setenv('REPO_RREV', project.revisionExpr)
for a in project.annotations:
setenv("REPO__%s" % (a.name), a.value)
if mirror:
setenv('GIT_DIR', project.gitdir)
cwd = project.gitdir
else:
cwd = project.worktree
if not os.path.exists(cwd):
if (opt.project_header and opt.verbose) \
or not opt.project_header:
print >>sys.stderr, 'skipping %s/' % project.relpath
continue
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd = cwd,
shell = shell,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
if opt.project_header:
class sfd(object):
def __init__(self, fd, dest):
self.fd = fd
self.dest = dest
def fileno(self):
return self.fd.fileno()
empty = True
errbuf = ''
p.stdin.close()
s_in = [sfd(p.stdout, sys.stdout),
sfd(p.stderr, sys.stderr)]
for s in s_in:
flags = fcntl.fcntl(s.fd, fcntl.F_GETFL)
fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
while s_in:
in_ready, _out_ready, _err_ready = select.select(s_in, [], [])
for s in in_ready:
buf = s.fd.read(4096)
if not buf:
s.fd.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.fd != p.stdout:
errbuf += buf
continue
if empty:
if first:
first = False
else:
out.nl()
out.project('project %s/', project.relpath)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
if r != 0 and r != rc:
rc = r
if rc != 0:
sys.exit(rc)
| apache-2.0 |
jsoref/django | django/core/checks/security/csrf.py | 477 | 1796 | from django.conf import settings
from .. import Tags, Warning, register
W003 = Warning(
"You don't appear to be using Django's built-in "
"cross-site request forgery protection via the middleware "
"('django.middleware.csrf.CsrfViewMiddleware' is not in your "
"MIDDLEWARE_CLASSES). Enabling the middleware is the safest approach "
"to ensure you don't leave any holes.",
id='security.W003',
)
W016 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE_CLASSES, but you have not set CSRF_COOKIE_SECURE to True. "
"Using a secure-only CSRF cookie makes it more difficult for network "
"traffic sniffers to steal the CSRF token.",
id='security.W016',
)
W017 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE_CLASSES, but you have not set CSRF_COOKIE_HTTPONLY to True. "
"Using an HttpOnly CSRF cookie makes it more difficult for cross-site "
"scripting attacks to steal the CSRF token.",
id='security.W017',
)
def _csrf_middleware():
return "django.middleware.csrf.CsrfViewMiddleware" in settings.MIDDLEWARE_CLASSES
@register(Tags.security, deploy=True)
def check_csrf_middleware(app_configs, **kwargs):
passed_check = _csrf_middleware()
return [] if passed_check else [W003]
@register(Tags.security, deploy=True)
def check_csrf_cookie_secure(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_SECURE
)
return [] if passed_check else [W016]
@register(Tags.security, deploy=True)
def check_csrf_cookie_httponly(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_HTTPONLY
)
return [] if passed_check else [W017]
| bsd-3-clause |
bmhatfield/graphite-web | webapp/tests/test_storage.py | 34 | 1383 | import logging
from graphite.storage import Store
from django.conf import settings
from django.test import TestCase
# Silence logging during tests
LOGGER = logging.getLogger()
# logging.NullHandler is a python 2.7ism
if hasattr(logging, "NullHandler"):
LOGGER.addHandler(logging.NullHandler())
class StorageTest(TestCase):
def test_store(self):
# Save settings
old_cluster_servers = settings.CLUSTER_SERVERS
old_remote_exclude_local = settings.REMOTE_EXCLUDE_LOCAL
# Set test cluster servers
settings.CLUSTER_SERVERS = ['127.0.0.1', '8.8.8.8']
# Test REMOTE_EXCLUDE_LOCAL = False
settings.REMOTE_EXCLUDE_LOCAL = False
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Test REMOTE_EXCLUDE_LOCAL = True
settings.REMOTE_EXCLUDE_LOCAL = True
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' not in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Restore original settings
settings.CLUSTER_SERVERS = old_cluster_servers
settings.REMOTE_EXCLUDE_LOCAL = old_remote_exclude_local
| apache-2.0 |
40423217/2016fallcadp_hw | plugin/liquid_tags/test_flickr.py | 278 | 2466 | from . import flickr
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import os
import pytest
import re
PLUGIN_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data')
@pytest.mark.parametrize('input,expected', [
('18873146680 large "test 1"',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873146680 large \'test 1\'',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873143536360 medium "test number two"',
dict(photo_id='18873143536360',
size='medium',
alt='test number two')),
('18873143536360 small "test number 3"',
dict(photo_id='18873143536360',
size='small',
alt='test number 3')),
('18873143536360 "test 4"',
dict(photo_id='18873143536360',
size=None,
alt='test 4')),
('18873143536360',
dict(photo_id='18873143536360',
size=None,
alt=None)),
('123456 small',
dict(photo_id='123456',
size='small',
alt=None))
])
def test_regex(input, expected):
assert re.match(flickr.PARSE_SYNTAX, input).groupdict() == expected
@pytest.mark.parametrize('input,expected', [
(['1', 'server1', '1', 'secret1', 'small'],
'https://farm1.staticflickr.com/server1/1_secret1_n.jpg'),
(['2', 'server2', '2', 'secret2', 'medium'],
'https://farm2.staticflickr.com/server2/2_secret2_c.jpg'),
(['3', 'server3', '3', 'secret3', 'large'],
'https://farm3.staticflickr.com/server3/3_secret3_b.jpg')
])
def test_source_url(input, expected):
assert flickr.source_url(
input[0], input[1], input[2], input[3], input[4]) == expected
@patch('liquid_tags.flickr.urlopen')
def test_generage_html(mock_urlopen):
# mock the return to deliver the flickr.json file instead
with open(TEST_DATA_DIR + '/flickr.json', 'rb') as f:
mock_urlopen.return_value.read.return_value = f.read()
attrs = dict(
photo_id='1234567',
size='large',
alt='this is a test'
)
expected = ('<a href="https://www.flickr.com/photos/'
'marvinxsteadfast/18841055371/">'
'<img src="https://farm6.staticflickr.com/5552/1234567_'
'17ac287217_b.jpg" alt="this is a test"></a>')
assert flickr.generate_html(attrs, 'abcdef') == expected
| agpl-3.0 |
finalion/WordQuery | src/service/remotemdx.py | 1 | 3508 | #-*- coding:utf-8 -*-
#
# Copyright © 2016–2017 Liang Feng <[email protected]>
#
# Support: Report an issue at https://github.com/finalion/WordQuery/issues
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version; http://www.gnu.org/copyleft/gpl.html.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
# import ntpath
import re
import urllib
try:
import urllib2
except:
import urllib.request as urllib2
import urlparse
from collections import defaultdict
from aqt.utils import showInfo, showText
from .base import QueryResult, WebService, export, register, with_styles
@register(u'MDX server')
class RemoteMdx(WebService):
def __init__(self):
super(RemoteMdx, self).__init__()
self.cache = defaultdict(set)
def active(self, dict_path, word):
self.word = word
self.url = dict_path + \
'/' if not dict_path.endswith('/') else dict_path
try:
req = urllib2.urlopen(self.url + word)
result, js = self.adapt_to_anki(req.read())
return QueryResult(result=result, js=js)
except:
return QueryResult.default()
def download_media_files(self, data):
diff = data.difference(self.cache[self.url])
self.cache[self.url].update(diff)
errors, styles = list(), list()
for each in diff:
basename = os.path.basename(each.replace('\\', os.path.sep))
saved_basename = '_' + basename
abs_url = urlparse.urljoin(self.url, each)
if basename.endswith('.css') or basename.endswith('.js'):
styles.append(saved_basename)
if not os.path.exists(saved_basename):
try:
urllib.urlretrieve(abs_url, saved_basename)
except:
errors.append(each)
return errors, styles
def adapt_to_anki(self, html):
"""
1. convert the media path to actual path in anki's collection media folder.
2. remove the js codes
3. import css, to make sure the css file can be synced. TO VALIDATE!
"""
media_files_set = set()
mcss = re.findall(r'href="(\S+?\.css)"', html)
media_files_set.update(set(mcss))
mjs = re.findall(r'src="([\w\./]\S+?\.js)"', html)
media_files_set.update(set(mjs))
msrc = re.findall(r'<img.*?src="([\w\./]\S+?)".*?>', html)
media_files_set.update(set(msrc))
for each in media_files_set:
html = html.replace(each, '_' + each.split('/')[-1])
errors, styles = self.download_media_files(media_files_set)
html = u'<br>'.join([u"<style>@import url('%s');</style>".format(style)
for style in styles if style.endswith('.css')]) + html
js = re.findall(r'<script.*?>.*?</script>', html, re.DOTALL)
# for each in js:
# html = html.replace(each, '')
# showText(html)
return unicode(html), u'\n'.join(js)
| gpl-3.0 |
sbalde/edxplatform | common/djangoapps/embargo/admin.py | 154 | 1315 | """
Django admin page for embargo models
"""
from django.contrib import admin
import textwrap
from config_models.admin import ConfigurationModelAdmin
from embargo.models import IPFilter, CountryAccessRule, RestrictedCourse
from embargo.forms import IPFilterForm, RestrictedCourseForm
class IPFilterAdmin(ConfigurationModelAdmin):
"""Admin for blacklisting/whitelisting specific IP addresses"""
form = IPFilterForm
fieldsets = (
(None, {
'fields': ('enabled', 'whitelist', 'blacklist'),
'description': textwrap.dedent("""Enter specific IP addresses to explicitly
whitelist (not block) or blacklist (block) in the appropriate box below.
Separate IP addresses with a comma. Do not surround with quotes.
""")
}),
)
class CountryAccessRuleInline(admin.StackedInline):
"""Inline editor for country access rules. """
model = CountryAccessRule
extra = 1
def has_delete_permission(self, request, obj=None):
return True
class RestrictedCourseAdmin(admin.ModelAdmin):
"""Admin for configuring course restrictions. """
inlines = [CountryAccessRuleInline]
form = RestrictedCourseForm
admin.site.register(IPFilter, IPFilterAdmin)
admin.site.register(RestrictedCourse, RestrictedCourseAdmin)
| agpl-3.0 |
40423147/2017springcd_hw | plugin/liquid_tags/test_audio.py | 273 | 1456 | from . import audio
import pytest
import re
@pytest.mark.parametrize('input,expected', [
('http://foo.bar https://bar.foo',
('http://foo.bar', 'https://bar.foo', None)),
('http://test.foo',
('http://test.foo', None, None)),
('https://test.foo',
('https://test.foo', None, None)),
('http://foo.foo https://bar.bar http://zonk.zonk',
('http://foo.foo', 'https://bar.bar', 'http://zonk.zonk'))
])
def test_regex(input, expected):
assert re.match(audio.AUDIO, input).groups() == expected
@pytest.mark.parametrize('input,expected', [
('http://foo.foo/foo.mp3',
('<audio controls>'
'<source src="http://foo.foo/foo.mp3" type="audio/mpeg">'
'Your browser does not support the audio element.</audio>')),
('https://foo.foo/foo.ogg http://bar.bar/bar.opus',
('<audio controls>'
'<source src="https://foo.foo/foo.ogg" type="audio/ogg">'
'<source src="http://bar.bar/bar.opus" type="audio/ogg">'
'Your browser does not support the audio element.</audio>')),
('http://1.de/1.wav http://2.de/2.mp4 http://3.de/3.ogg',
('<audio controls>'
'<source src="http://1.de/1.wav" type="audio/wav">'
'<source src="http://2.de/2.mp4" type="audio/mp4">'
'<source src="http://3.de/3.ogg" type="audio/ogg">'
'Your browser does not support the audio element.</audio>'))
])
def test_create_html(input, expected):
assert audio.create_html(input) == expected
| gpl-3.0 |
cornelvlad/qualitybots | src/webdriver/appengine_communicator.py | 26 | 11159 | #!/usr/bin/python2.6
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles test distribution and results upload to app engine."""
import base64
import json
import math
import random
import time
import urllib
import urllib2
import zlib
import blobstore_upload
import client_logging
# Define the constants
_BLOBSTORE_UPLOAD_RETRIES = 3
_PIECES_UPLOAD_RETRIES = 3
_MAX_WAIT_TIME = 3
_TEST_DISTRIBUTION_SERVER = 'http://YOUR_APPENGINE_SERVER_HERE'
_FETCH_TEST_URL = _TEST_DISTRIBUTION_SERVER + '/distributor/accept_work_item'
_FINISH_TEST_URL = _TEST_DISTRIBUTION_SERVER + '/distributor/finish_work_item'
_RESULTS_SERVER = 'http://YOUR_APPENGINE_SERVER_HERE'
_RESULTS_UPLOAD_URL = _RESULTS_SERVER + '/putdata'
_LOG_UPLOAD_URL = _RESULTS_SERVER + '/distributor/upload_client_log'
LOGGER_NAME = 'appengine_communicator'
# Initialize the logger for this module
logger = client_logging.GetLogger(LOGGER_NAME)
class CommunicationError(Exception):
pass
class AuthCookie(object):
"""A data object that contains cookie dictionaries used to authenticate.
Attributes:
domain: A string representing the domain to authenticate on.
cookies: A list of dictionaries that define the cookies to add to the
browser in order to authenticate for a webpage.
"""
def __init__(self, domain, cookies):
self.domain = domain
self.cookies = cookies
class TestCase(object):
"""A data object describing a test case to run for bots.
Attributes:
url: A string indicating the URL to run for the test.
start_time: A string indicating the start time for the test.
config: A dictionary that specifies various configuration settings for
the test.
test_key: An integer representing the key that identifies this test.
auth_cookie: An AuthCookie object that represents data for authenticating
for the test case.
"""
def __init__(self, url, start_time, config, test_key, auth_domain=None,
auth_cookies=None):
self.url = url
self.start_time = start_time
self.config = config
self.test_key = test_key
self.auth_cookie = None
if auth_domain and auth_cookies:
self.auth_cookie = AuthCookie(auth_domain, auth_cookies)
class AppEngineCommunicator(object):
"""Handles communication with the test distributor and results servers.
Attributes:
_token: A string representing the token to use to pull tests from the
distributor.
_useragent: A string representing the useragent of the browser under test.
_instance_id: A string representing a unique identifier for the machine
instance.
_current_test_case: A TestCase object representing the current test case.
_log_uploaded: A boolean indicating whether the log file has been uploaded.
"""
def __init__(self, token, useragent, instance_id):
# Set up the attributes
self._token = token
self._useragent = useragent
self._instance_id = instance_id
self._current_test_case = None
self._log_uploaded = False
# TODO(user): Move this function into a shared utility module.
@staticmethod
def ExponentialBackoff(attempt, max_wait_time=_MAX_WAIT_TIME):
"""Wait a time that increases exponentially with the attempt number.
Args:
attempt: The most recent attempt number (starting at 0).
max_wait_time: An optional int that specifies the max base time to wait
in seconds.
"""
sleep_time = math.pow(2, attempt) * random.uniform(0.5, 1.0) * max_wait_time
time.sleep(sleep_time)
def FetchTest(self):
"""Fetch a new test from the test distributor.
This function will not prevent you from fetching another test if you have a
current test case that hasn't been finished. The old test case will be over
written by the new test case.
Returns:
A TestCase object describing the test case that was fetched. If there are
no more tests to run, None is returned.
Raises:
CommunicationError: There is an error in fetching the test.
"""
# Fetch the test case from the test distributor.
try:
data = urllib.urlencode({
'tokens': self._token, 'useragent': urllib.quote(self._useragent),
'instance_id': self._instance_id})
url_page = urllib2.urlopen(_FETCH_TEST_URL, data)
except urllib2.URLError:
self._LogAndRaiseException('Failed to fetch a test from app engine.')
# Process the data from the test distributor.
self._current_test_case = None
try:
test_dictionary = json.loads(url_page.read())
# Check if there is a test available.
if test_dictionary:
test_config = json.loads(test_dictionary['config'])
auth_domain = None
auth_cookies = None
if 'auth_domain' in test_config:
auth_domain = test_config['auth_domain']
if 'auth_cookies' in test_config:
auth_cookies = test_config['auth_cookies']
self._current_test_case = TestCase(
test_dictionary['data_str'][19:-1], test_dictionary['start_time'],
test_config, test_dictionary['key'], auth_domain=auth_domain,
auth_cookies=auth_cookies)
except ValueError:
logger.exception('Could not process the data from the test distributor.')
return self._current_test_case
def FinishTest(self, result):
"""Acknowledge that the current test case has been finished.
Args:
result: A string indicating the result of executing the test case.
Raises:
CommunicationError: There is an error communicating with
the test distributor.
"""
# Make sure there is a current test case to finish.
if not self._current_test_case:
return
try:
data = urllib.urlencode({'key': self._current_test_case.test_key,
'result': result,
'instance_id': self._instance_id})
urllib2.urlopen(_FINISH_TEST_URL, data)
self._current_test_case = None
except urllib2.URLError:
self._LogAndRaiseException('Failed acknowledging that the test finished.')
def _LogAndRaiseException(self, message):
"""Log the current exception being handled and raise a new exception.
Args:
message: A string indicating the message to log and use with the new
exception.
Raises:
CommunicationError: This exception is always raised using the given
message.
"""
logger.exception(message)
raise CommunicationError(message)
def UploadResults(self, nodes_table, layout_table, dynamic_content_table,
png, channel=''):
"""Upload the test case results to the results server.
Args:
nodes_table: A list representing the node results from the test case.
layout_table: A list representing the layout results from the test case.
dynamic_content_table: A list representing the dynamic content results
from the test case.
png: A string representing the binary data for a png image.
channel: An optional string representing the channel for the browser.
Raises:
CommunicationError: The initial upload communication failed.
"""
# Make sure there is a current test case to upload results for.
if not self._current_test_case:
return
# Format the results data for uploading.
suite_info = {
'date': self._current_test_case.start_time,
'key': self._current_test_case.test_key,
'refBrowser': self._current_test_case.config['refBrowser'],
'refBrowserChannel': self._current_test_case.config['refBrowserChannel']
}
data_to_send = {
'userAgent': self._useragent,
'url': self._current_test_case.url,
'nodesTable': base64.b64encode(
zlib.compress(json.dumps(nodes_table), 9)),
'dynamicContentTable': json.dumps(dynamic_content_table),
'width': self._current_test_case.config['width'],
'height': self._current_test_case.config['height'],
'channel': channel,
'suiteInfo': json.dumps(suite_info),
'instance_id': self._instance_id
}
# Upload the initial data.
try:
initial_send = urllib2.urlopen(
_RESULTS_UPLOAD_URL, urllib.urlencode(data_to_send))
except urllib2.URLError:
self._LogAndRaiseException('Failed on the initial results upload.')
response = initial_send.read()
if not response:
self._LogAndRaiseException(
'Initial results upload did not provide continuation data.')
response = json.loads(response)
upload_key = response['key'].encode('ascii')
num_pieces = int(response['nPieces'])
layout_table_length = len(layout_table)
logger.info('Uploading the image to blobstore with key "%s".', upload_key)
for attempt in range(_BLOBSTORE_UPLOAD_RETRIES):
try:
blobstore_upload.UploadImageToBlobstore(upload_key, png)
break
except blobstore_upload.BlobstoreUploadError:
logger.exception('Blobstore upload failed, attempt %d.', attempt+1)
AppEngineCommunicator.ExponentialBackoff(attempt)
# Send the layout table in the requested number of pieces.
logger.info('Uploading remaining results in %d pieces.', num_pieces)
n_rows_per_piece = int(math.ceil(layout_table_length / (num_pieces * 1.0)))
start = 0
end = n_rows_per_piece
for i in range(num_pieces):
data_pieces_to_send = {
'key': upload_key,
'layoutTable': json.dumps(layout_table[start:end]),
'i': i,
'instance_id': self._instance_id
}
for attempt in range(_PIECES_UPLOAD_RETRIES):
try:
urllib2.urlopen(_RESULTS_UPLOAD_URL,
urllib.urlencode(data_pieces_to_send))
break
except urllib2.URLError:
logger.exception('Piece "%d" upload failed, attempt %d.',
i, attempt+1)
AppEngineCommunicator.ExponentialBackoff(attempt)
start = end
end = min(end+n_rows_per_piece, len(layout_table))
def UploadLog(self, log):
"""Upload the test case results to the results server.
Args:
log: A string representing the client log to upload.
"""
# Upload the log data if this is our first upload.
if self._log_uploaded:
return
try:
urllib2.urlopen(_LOG_UPLOAD_URL, urllib.urlencode(
{'log': base64.b64encode(zlib.compress(json.dumps(log), 9)),
'instance_id': self._instance_id}))
self._log_uploaded = True
except:
raise CommunicationError('Failed to upload the client log.')
| apache-2.0 |
matrixise/odoo | openerp/service/websrv_lib.py | 380 | 7780 | # -*- coding: utf-8 -*-
#
# Copyright P. Christeas <[email protected]> 2008-2010
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
""" Framework for generic http servers
This library contains *no* OpenERP-specific functionality. It should be
usable in other projects, too.
"""
import logging
import SocketServer
from BaseHTTPServer import *
from SimpleHTTPServer import SimpleHTTPRequestHandler
_logger = logging.getLogger(__name__)
class AuthRequiredExc(Exception):
def __init__(self,atype,realm):
Exception.__init__(self)
self.atype = atype
self.realm = realm
class AuthRejectedExc(Exception):
pass
class AuthProvider:
def __init__(self,realm):
self.realm = realm
def authenticate(self, user, passwd, client_address):
return False
def log(self, msg):
print msg
def checkRequest(self,handler,path = '/'):
""" Check if we are allowed to process that request
"""
pass
class HTTPHandler(SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
SimpleHTTPRequestHandler.__init__(self,request,client_address,server)
# print "Handler for %s inited" % str(client_address)
self.protocol_version = 'HTTP/1.1'
self.connection = dummyconn()
def handle(self):
""" Classes here should NOT handle inside their constructor
"""
pass
def finish(self):
pass
def setup(self):
pass
# A list of HTTPDir.
handlers = []
class HTTPDir:
""" A dispatcher class, like a virtual folder in httpd
"""
def __init__(self, path, handler, auth_provider=None, secure_only=False):
self.path = path
self.handler = handler
self.auth_provider = auth_provider
self.secure_only = secure_only
def matches(self, request):
""" Test if some request matches us. If so, return
the matched path. """
if request.startswith(self.path):
return self.path
return False
def instanciate_handler(self, request, client_address, server):
handler = self.handler(noconnection(request), client_address, server)
if self.auth_provider:
handler.auth_provider = self.auth_provider()
return handler
def reg_http_service(path, handler, auth_provider=None, secure_only=False):
""" Register a HTTP handler at a given path.
The auth_provider will be instanciated and set on the handler instances.
"""
global handlers
service = HTTPDir(path, handler, auth_provider, secure_only)
pos = len(handlers)
lastpos = pos
while pos > 0:
pos -= 1
if handlers[pos].matches(service.path):
lastpos = pos
# we won't break here, but search all way to the top, to
# ensure there is no lesser entry that will shadow the one
# we are inserting.
handlers.insert(lastpos, service)
def list_http_services(protocol=None):
global handlers
ret = []
for svc in handlers:
if protocol is None or protocol == 'http' or svc.secure_only:
ret.append((svc.path, str(svc.handler)))
return ret
def find_http_service(path, secure=False):
global handlers
for vdir in handlers:
p = vdir.matches(path)
if p == False or (vdir.secure_only and not secure):
continue
return vdir
return None
class noconnection(object):
""" a class to use instead of the real connection
"""
def __init__(self, realsocket=None):
self.__hidden_socket = realsocket
def makefile(self, mode, bufsize):
return None
def close(self):
pass
def getsockname(self):
""" We need to return info about the real socket that is used for the request
"""
if not self.__hidden_socket:
raise AttributeError("No-connection class cannot tell real socket")
return self.__hidden_socket.getsockname()
class dummyconn:
def shutdown(self, tru):
pass
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class FixSendError:
#error_message_format = """ """
def send_error(self, code, message=None):
#overriden from BaseHTTPRequestHandler, we also send the content-length
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
_logger.error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(content) or 0)
self.end_headers()
if hasattr(self, '_flush'):
self._flush()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
class HttpOptions:
_HTTP_OPTIONS = {'Allow': ['OPTIONS' ] }
def do_OPTIONS(self):
"""return the list of capabilities """
opts = self._HTTP_OPTIONS
nopts = self._prep_OPTIONS(opts)
if nopts:
opts = nopts
self.send_response(200)
self.send_header("Content-Length", 0)
if 'Microsoft' in self.headers.get('User-Agent', ''):
self.send_header('MS-Author-Via', 'DAV')
# Microsoft's webdav lib ass-umes that the server would
# be a FrontPage(tm) one, unless we send a non-standard
# header that we are not an elephant.
# http://www.ibm.com/developerworks/rational/library/2089.html
for key, value in opts.items():
if isinstance(value, basestring):
self.send_header(key, value)
elif isinstance(value, (tuple, list)):
self.send_header(key, ', '.join(value))
self.end_headers()
def _prep_OPTIONS(self, opts):
"""Prepare the OPTIONS response, if needed
Sometimes, like in special DAV folders, the OPTIONS may contain
extra keywords, perhaps also dependant on the request url.
:param opts: MUST be copied before being altered
:returns: the updated options.
"""
return opts
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Hack42/BusNotifier | webscraping/pdict.py | 1 | 15581 | __doc__ = """
pdict has a dictionary like interface and a sqlite backend
It uses pickle to store Python objects and strings, which are then compressed
Multithreading is supported
"""
import os
import sys
import datetime
import sqlite3
import zlib
import threading
import md5
import shutil
import glob
try:
import cPickle as pickle
except ImportError:
import pickle
DEFAULT_LIMIT = 1000
DEFAULT_TIMEOUT = 10000
class _PersistentDictPool:
def __init__(self, filename, max_size=2):
"""Splits cache over multiple sqlite instances to avoid each exceeding the limit
`filename' of the cache
`max_size' in GB of the cache before splitting
"""
#caches = glob.glob(filename + '*')
#print len(caches)
#os.path.getsize(f) for f in caches
class PersistentDict:
"""Stores and retrieves persistent data through a dict-like interface
Data is stored compressed on disk using sqlite3
filename:
where to store sqlite database. Uses in memory by default.
compress_level:
between 1-9 (in my test levels 1-3 produced a 1300kb file in ~7 seconds while 4-9 a 288kb file in ~9 seconds)
expires:
a timedelta object of how old data can be before expires. By default is set to None to disable.
timeout:
how long should a thread wait for sqlite to be ready (in ms)
isolation_level:
None for autocommit or else 'DEFERRED' / 'IMMEDIATE' / 'EXCLUSIVE'
>>> filename = 'cache.db'
>>> cache = PersistentDict(filename)
>>> url = 'http://google.com/abc'
>>> html = '<html>abc</html>'
>>>
>>> url in cache
False
>>> cache[url] = html
>>> url in cache
True
>>> cache[url] == html
True
>>> cache.get(url)['value'] == html
True
>>> now = datetime.datetime.now()
>>> cache.meta(url)
{}
>>> cache.meta(url, 'meta')
>>> cache.meta(url)
'meta'
>>> del cache[url]
>>> url in cache
False
>>> os.remove(filename)
"""
def __init__(self, filename='cache.db', compress_level=6, expires=None, timeout=DEFAULT_TIMEOUT, isolation_level=None, disk=False):
"""initialize a new PersistentDict with the specified database file.
"""
self.filename = filename
self.compress_level = compress_level
self.expires = expires
self.timeout = timeout
self._conn = sqlite3.connect(filename, timeout=timeout, isolation_level=isolation_level, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
self._conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
sql = """
CREATE TABLE IF NOT EXISTS config (
key TEXT NOT NULL PRIMARY KEY UNIQUE,
value BLOB,
meta BLOB,
status INTEGER,
updated timestamp DEFAULT (datetime('now', 'localtime'))
);
"""
self._conn.execute(sql)
self._conn.execute("CREATE INDEX IF NOT EXISTS keys ON config (key);")
if disk:
self.fscache = FSCache(os.path.dirname(filename))
else:
self.fscache = None
def __copy__(self):
"""make a copy of current cache settings
"""
return PersistentDict(filename=self.filename, compress_level=self.compress_level, expires=self.expires, timeout=self.timeout)
def __contains__(self, key):
"""check the database to see if a key exists
"""
row = self._conn.execute("SELECT updated FROM config WHERE key=?;", (key,)).fetchone()
return row and self.is_fresh(row[0])
def __iter__(self):
"""iterate each key in the database
"""
c = self._conn.cursor()
c.execute("SELECT key FROM config;")
for row in c:
yield row[0]
def __getitem__(self, key):
"""return the value of the specified key or raise KeyError if not found
"""
row = self._conn.execute("SELECT value, updated FROM config WHERE key=?;", (key,)).fetchone()
if row:
if self.is_fresh(row[1]):
try:
if self.fscache:
value = self.fscache[key]
else:
# XXX remove this when migrated
raise KeyError()
except KeyError:
value = row[0]
return self.deserialize(value)
else:
raise KeyError("Key `%s' is stale" % key)
else:
raise KeyError("Key `%s' does not exist" % key)
def __delitem__(self, key):
"""remove the specifed value from the database
"""
self._conn.execute("DELETE FROM config WHERE key=?;", (key,))
if self.fscache:
del self.fscache[key]
def __setitem__(self, key, value):
"""set the value of the specified key
"""
updated = datetime.datetime.now()
if self.fscache:
self._conn.execute("INSERT OR REPLACE INTO config (key, meta, updated) VALUES(?, ?, ?, ?);", (
key, self.serialize({}), updated)
)
self.fscache[key] = self.serialize(value)
else:
self._conn.execute("INSERT OR REPLACE INTO config (key, value, meta, updated) VALUES(?, ?, ?, ?);", (
key, self.serialize(value), self.serialize({}), updated)
)
def serialize(self, value):
"""convert object to a compressed pickled string to save in the db
"""
return sqlite3.Binary(zlib.compress(pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL), self.compress_level))
def deserialize(self, value):
"""convert compressed pickled string from database back into an object
"""
if value:
return pickle.loads(zlib.decompress(value))
def is_fresh(self, t):
"""returns whether this datetime has expired
"""
return self.expires is None or datetime.datetime.now() - t < self.expires
def get(self, key, default=None):
"""Get data at key and return default if not defined
"""
data = default
if key:
row = self._conn.execute("SELECT value, meta, updated FROM config WHERE key=?;", (key,)).fetchone()
if row:
try:
if self.fscache:
value = self.fscache[key]
else:
# XXX remove after migrated
raise KeyError()
except KeyError:
value = row[0]
data = dict(
value=self.deserialize(value),
meta=self.deserialize(row[1]),
updated=row[2]
)
return data
def meta(self, key, value=None):
"""Get / set meta for this value
if value is passed then set the meta attribute for this key
if not then get the existing meta data for this key
"""
if value is None:
# want to get meta
row = self._conn.execute("SELECT meta FROM config WHERE key=?;", (key,)).fetchone()
if row:
return self.deserialize(row[0])
else:
raise KeyError("Key `%s' does not exist" % key)
else:
# want to set meta
self._conn.execute("UPDATE config SET meta=?, updated=? WHERE key=?;", (self.serialize(value), datetime.datetime.now(), key))
def clear(self):
"""Clear all cached data
"""
self._conn.execute("DELETE FROM config;")
if self.fscache:
self.fscache.clear()
def merge(self, db, override=False):
"""Merge this databases content
override determines whether to override existing keys
"""
for key in db.keys():
if override or key not in self:
self[key] = db[key]
class Queue:
"""Stores queue of outstanding URL's on disk
>>> filename = 'queue.db'
>>> queue = Queue(filename)
>>> keys = [('a', 1), ('b', 2), ('c', 1)]
>>> queue.push(keys) # add new keys
>>> len(queue)
3
>>> queue.push(keys) # trying adding duplicate keys
>>> len(queue)
3
>>> queue.clear(keys=['a'])
1
>>> queue.pull(limit=1)
[u'b']
>>> queue.clear() # remove all queue
1
>>> os.remove(filename)
"""
size = None
def __init__(self, filename, timeout=DEFAULT_TIMEOUT, isolation_level=None):
self._conn = sqlite3.connect(filename, timeout=timeout, isolation_level=isolation_level, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
self._conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
sql = """
CREATE TABLE IF NOT EXISTS queue (
key TEXT NOT NULL PRIMARY KEY UNIQUE,
status INTEGER,
priority INTEGER
);
"""
self._conn.execute(sql)
self._conn.execute("CREATE INDEX IF NOT EXISTS priorities ON queue (priority);")
if Queue.size is None:
self._update_size()
def __len__(self):
"""Get number of records queued
"""
return Queue.size
def _update_size(self):
"""Calculate the number of records queued
"""
row = self._conn.execute("SELECT count(*) FROM queue WHERE status=?;", (False,)).fetchone()
Queue.size = row[0]
def push(self, key_map):
"""Add these keys to the queue
Will not insert if key already exists.
key_map:
a list of (key, priority) tuples
"""
if key_map:
c = self._conn.cursor()
c.execute("BEGIN TRANSACTION")
c.executemany("INSERT OR IGNORE INTO queue (key, priority, status) VALUES(?, ?, ?);", [(key, priority, False) for key, priority in key_map])
c.execute("END TRANSACTION")
self._update_size()
def pull(self, limit=DEFAULT_LIMIT):
"""Get queued keys up to limit
"""
# XXX how to do this in a single transaction, and remove key index
ts = int(datetime.datetime.now().strftime('%s%f'))
self._conn.execute('UPDATE queue SET status=? WHERE key in (SELECT key FROM queue WHERE status=? ORDER BY priority DESC LIMIT ?);', (ts, False, limit))
rows = self._conn.execute('SELECT key FROM queue WHERE status=?', (ts,))
keys = [row[0] for row in rows]
Queue.size -= len(keys)
"""
c = self._conn.cursor()
rows = c.execute("SELECT key FROM queue WHERE status=? ORDER BY priority DESC LIMIT ?;", (False, limit)).fetchall()
keys = [row[0] for row in rows]
# set status to True
c.execute("BEGIN TRANSACTION")
c.executemany("UPDATE queue SET status=? WHERE key=?;", [(True, key) for key in keys])
c.execute("END TRANSACTION")"""
return keys
def clear(self, keys=None):
"""Remove keys from queue.
If keys is None remove all.
Returns the number of keys removed
"""
prev_size = len(self)
c = self._conn.cursor()
if keys:
c.execute("BEGIN TRANSACTION")
c.executemany("DELETE FROM queue WHERE key=?;", [(key,) for key in keys])
c.execute("END TRANSACTION")
self._update_size()
else:
c.execute("DELETE FROM queue;")
Queue.size = 0
return prev_size - len(self)
class FSCache:
"""
Dictionary interface that stores cached
values in the file system rather than in memory.
The file path is formed from an md5 hash of the key.
folder:
the root level folder for the cache
>>> fscache = FSCache('.')
>>> url = 'http://google.com/abc'
>>> html = '<html>abc</html>'
>>> url in fscache
False
>>> fscache[url] = html
>>> url in fscache
True
>>> fscache.get(url) == html
True
>>> fscache.get(html) == ''
True
>>> fscache.clear()
"""
PARENT_DIR = 'fscache'
FILE_NAME = 'index.html'
def __init__(self, folder):
self.folder = os.path.join(folder, FSCache.PARENT_DIR)
def __contains__(self, key):
"""Does data for this key exist
"""
return os.path.exists(self._key_path(key))
def __getitem__(self, key):
path = self._key_path(key)
try:
fp = open(path, 'rb')
except IOError:
# key does not exist
raise KeyError('%s does not exist' % key)
else:
# get value in key
return fp.read()
def __setitem__(self, key, value):
"""Save value at this key to this value
"""
path = self._key_path(key)
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
open(path, 'wb').write(value)
def __delitem__(self, key):
"""Remove the value at this key and any empty parent sub-directories
"""
path = self._key_path(key)
try:
os.remove(path)
os.removedirs(os.path.dirname(path))
except OSError:
pass
def _key_path(self, key):
"""The fils system path for this key
"""
# create unique hash for this key
try:
key = key.encode('utf-8')
except UnicodeDecodeError:
pass
h = md5.md5(key).hexdigest()
# create file system path
path = os.path.join(self.folder, os.path.sep.join(h), FSCache.FILE_NAME)
return path
def get(self, key, default=''):
"""Get data at this key and return default if does not exist
"""
try:
value = self[key]
except KeyError:
value = default
return value
def clear(self):
"""Remove all the cached values
"""
if os.path.exists(self.folder):
shutil.rmtree(self.folder)
if __name__ == '__main__':
import tempfile
import webbrowser
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] <cache file>')
parser.add_option('-k', '--key', dest='key', help='The key to use')
parser.add_option('-v', '--value', dest='value', help='The value to store')
parser.add_option('-b', '--browser', action='store_true', dest='browser', default=False, help='View content of this key in a web browser')
parser.add_option('-c', '--clear', action='store_true', dest='clear', default=False, help='Clear all data for this cache')
options, args = parser.parse_args()
if not args:
parser.error('Must specify the cache file')
cache = PersistentDict(args[0])
if options.value:
# store thie value
if options.key:
cache[options.key] = options.value
else:
parser.error('Must specify the key')
elif options.browser:
if options.key:
value = cache[options.key]
filename = tempfile.NamedTemporaryFile().name
fp = open(filename, 'w')
fp.write(value)
fp.flush()
webbrowser.open(filename)
else:
parser.error('Must specify the key')
elif options.key:
print cache[options.key]
elif options.clear:
if raw_input('Really? Clear the cache? (y/n) ') == 'y':
cache.clear()
print 'cleared'
else:
parser.error('No options selected')
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/application_gateway_firewall_rule_set.py | 1 | 2892 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ApplicationGatewayFirewallRuleSet(Resource):
"""A web application firewall rule set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param provisioning_state: The provisioning state of the web application
firewall rule set.
:type provisioning_state: str
:param rule_set_type: Required. The type of the web application firewall
rule set.
:type rule_set_type: str
:param rule_set_version: Required. The version of the web application
firewall rule set type.
:type rule_set_version: str
:param rule_groups: Required. The rule groups of the web application
firewall rule set.
:type rule_groups:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayFirewallRuleGroup]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
'rule_groups': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'rule_set_type': {'key': 'properties.ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'properties.ruleSetVersion', 'type': 'str'},
'rule_groups': {'key': 'properties.ruleGroups', 'type': '[ApplicationGatewayFirewallRuleGroup]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayFirewallRuleSet, self).__init__(**kwargs)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.rule_set_type = kwargs.get('rule_set_type', None)
self.rule_set_version = kwargs.get('rule_set_version', None)
self.rule_groups = kwargs.get('rule_groups', None)
| mit |
momikey/pyrge | examples/asteroid/asteroidexample.py | 1 | 8101 | import random
from pyrge import *
MAX_X = 640
MAX_Y = 420
class Asteroid(Entity, mixin.Wrapper):
LARGE = 0
MEDIUM = 1
SMALL = 2
def __init__(self, position=Vector(0,0), velocity=Vector(0,0), size=0):
super(Asteroid, self).__init__()
self.sizetype = size
self.reset(position,velocity,size)
def reset(self, position, velocity, size):
self.load(['large.png','medium.png','small.png'][size])
self.visible = True
self.alive = True
self.rotating = True
self.angularVelocity = random.randint(-60,60)
self.angle = random.randint(0,359)
if position:
# making a new one
self.position = position
self.velocity = velocity
return self
initial_vel = 20
if random.random() < 0.5:
# left or right
if random.random() < 0.5:
# on the left
self.x = 0
self.velocity.x = initial_vel / 2 + random.random() * initial_vel
else:
# on the right
self.x = Game.width
self.velocity.x = -initial_vel / 2 - random.random() * initial_vel
self.y = random.randint(20,MAX_Y - 20)
self.velocity.y = random.random() * initial_vel * 2 - initial_vel
else:
# top or bottom
if random.random() < 0.5:
# on the top
self.y = 0
self.velocity.y = initial_vel / 2 + random.random() * initial_vel
else:
# on the bottom
self.y = Game.height
self.velocity.y = -initial_vel / 2 - random.random() * initial_vel
self.x = random.randint(20,MAX_X - 20)
self.velocity.x = random.random() * initial_vel * 2 - initial_vel
return self
def update(self):
self.hitbox = self.rect.inflate(8,8)
self.mask = Game.Mask.from_surface(self.image)
super(Asteroid, self).update()
def kill(self):
if self.sizetype != Asteroid.SMALL:
newsize = self.sizetype + 1
initial_vel = 20 * (newsize + 1) # velocity hack
chunks = random.randint(2,4)
for i in xrange(chunks):
ax = self.x + self.width / 2
ay = self.y + self.height / 2
avx = random.random() * initial_vel * 2 - initial_vel
avy = random.random() * initial_vel * 2 - initial_vel
a = Asteroid.create(Vector(ax, ay), Vector(avx, avy), newsize)
Game.world.add(a)
if self.sizetype == Asteroid.LARGE:
Game.world.score += 10
elif self.sizetype == Asteroid.MEDIUM:
Game.world.score += 50
elif self.sizetype == Asteroid.SMALL:
Game.world.score += 100
super(Asteroid, self).kill()
@staticmethod
def create(position=Vector(0,0), velocity=Vector(0,0), size=0):
newa = Game.world.getAvailable(Asteroid)
if newa is None:
newa = Asteroid(position, velocity, size)
else:
newa.reset(position, velocity, size)
return newa
class Ship(Entity, mixin.Wrapper):
def __init__(self):
super(Ship, self).__init__(x=MAX_X/2-8, y=MAX_Y/2-8)
self.load('ship.png')
self.angle = 90
self.maxThrust = 10
self.maxVelocity = Vector(240,240)
self.lives = 3
self.warps = 5
Game.world.addHandler(Game.events.KEYDOWN, self.onSpace)
Game.world.addHandler(Game.events.KEYDOWN, self.onWarp)
def update(self):
self.angularVelocity = 0
if Game.keys[Game.Constants.K_LEFT]:
self.angularVelocity = 180
elif Game.keys[Game.Constants.K_RIGHT]:
self.angularVelocity = -180
self.thrust = 0
if Game.keys[Game.Constants.K_UP]:
self.thrust -= util.vectorFromAngle(self.angle) * self.maxThrust
self.velocity += self.thrust
self.hitbox = self.rect.inflate(-10,-10)
super(Ship, self).update()
def reset(self):
oldpos = self.position
newpos = (random.randint(0,MAX_X), random.randint(0,MAX_Y))
asts = [a for a in Game.world.getEntities(Asteroid) if a.visible]
collided = False
for a in asts:
if a.rect.inflate(32,32).collidepoint(newpos):
collided = True
if collided:
newpos = self.reset()
self.position = newpos
self.velocity = 0.0, 0.0
return newpos
def kill(self):
if self.lives > 0:
self.reset()
self.lives -= 1
else:
super(Ship, self).kill()
def onSpace(self, evt):
if evt.key == Game.Constants.K_SPACE and self.alive:
b = Game.world.getAvailable(Bullet) #self.bullets[self.bulletIndex]
if b is None:
b = Bullet()
else:
b.lifetime = 2000
b.alive = True
b.visible = True
b.angle = self.angle
b.position = self.position
b.velocity = util.vectorFromAngle(self.angle) * 150 + self.velocity
Game.world.add(b)
def onWarp(self, evt):
if evt.key == Game.Constants.K_x:
if self.warps > 0:
self.reset()
self.warps -= 1
class Bullet(Entity, mixin.Wrapper):
def __init__(self):
super(Bullet, self).__init__()
surf = Game.Surface((4,4), Game.Constants.SRCALPHA)
Game.Draw.circle(surf, (255,255,255), (2,2), 2)
self.loadSurface(surf)
self.alive = False
self.visible = False
self.lifetime = 2000
def update(self):
if self.alive:
self.lifetime -= Game.elapsed
if self.lifetime <= 0:
self.kill()
super(Bullet, self).update()
class TheGame(World):
def __init__(self):
super(TheGame, self).__init__(fps=30)
self.followBounds(followMax=(MAX_X,MAX_Y))
self.lives = 3
self.reset()
def reset(self):
for e in self._entities:
e.kill()
self._entities.empty()
for h in self._evtHandlers[Game.events.KEYDOWN]:
self.removeHandler(Game.events.KEYDOWN, h)
self.addAsteroid()
self.ship = Ship()
self.score = 0
self.statusbg = Image(0,0,self.width,self.height - MAX_Y, name="Status")
self.statusbg.position = self.width/2, MAX_Y + (self.height-MAX_Y)/2
self.statusbg.pixels.fill((50,50,50))
self.statusline = text.Text(x=-80, y=-20, autowidth=True, color=(255,255,255))
self.add(self.ship)
self.add(self.statusbg)
self.statusbg.addChild(self.statusline)
self.timer = 0
def update(self):
self.timer -= Game.elapsed
if self.timer <= 0:
self.addAsteroid()
collided = lambda f,s: entity.Entity.collide(f,s,kill=True)
asts = Game.Sprite.Group([a for a in self.getEntities(Asteroid)])
bullets = Game.Sprite.Group([b for b in self.getEntities(Bullet)])
Game.Sprite.groupcollide(bullets, asts, False, False, collided)
Game.Sprite.spritecollide(self.ship, asts, False, collided)
asts.empty()
bullets.empty()
if self.ship.alive:
self.statusline.text = "Score: %d\tLives: %d" % (self.score,self.ship.lives)
else:
self.statusline.text = "Score: %d\tGame Over!" % self.score
super(TheGame, self).update()
def addAsteroid(self):
if len([e for e in self.getEntities(Asteroid) if e.alive]) < 32:
a = Asteroid.create()
a.visible = True
self.add(a)
self.timer = random.randint(0,10000)
def getAvailable(self, cls=Asteroid):
for s in self.getEntities(cls):
if not s.alive:
return s
return None
if __name__ == '__main__':
TheGame().loop()
| lgpl-2.1 |
sadaf2605/django | django/db/migrations/operations/base.py | 127 | 4888 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
# Should this operation be considered safe to elide and optimize across?
elidable = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def reduce(self, operation, in_between, app_label=None):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| bsd-3-clause |
jmesteve/saas3 | openerp/report/render/rml2pdf/utils.py | 381 | 7143 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2003, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.error("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MihaiMoldovanu/ansible | test/units/playbook/test_play.py | 185 | 4200 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlay(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_play(self):
p = Play.load(dict())
self.assertEqual(str(p), '')
def test_basic_play(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
become=True,
become_user="testing",
))
def test_play_with_user_conflict(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
self.assertEqual(p.remote_user, "testing")
def test_play_with_user_conflict(self):
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
self.assertRaises(AnsibleParserError, Play.load, play_data)
def test_play_with_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_handlers(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
def test_play_with_pre_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_post_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_with_roles(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
mock_var_manager = MagicMock()
mock_var_manager.get_vars.return_value = dict()
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader, variable_manager=mock_var_manager)
blocks = p.compile()
def test_play_compile(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
self.assertEqual(len(blocks), 4)
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/testing/tests/test_decorators.py | 67 | 4134 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import *
from numpy.testing.noseclasses import KnownFailureTest
import nose
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# first warnings is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| mit |
hainm/statsmodels | statsmodels/tsa/tests/results/arima211_css_results.py | 36 | 44583 | import numpy as np
llf = np.array([-240.29558272688])
nobs = np.array([ 202])
k = np.array([ 5])
k_exog = np.array([ 1])
sigma = np.array([ .79494581155191])
chi2 = np.array([ 1213.6019521322])
df_model = np.array([ 3])
k_ar = np.array([ 2])
k_ma = np.array([ 1])
params = np.array([ .72428568600554,
1.1464248419014,
-.17024528879204,
-.87113675466923,
.63193884330392])
cov_params = np.array([ .31218565961764,
-.01618380799341,
.00226345462929,
.01386291798401,
-.0036338799176,
-.01618380799341,
.00705713030623,
-.00395404914463,
-.00685704952799,
-.00018629958479,
.00226345462929,
-.00395404914463,
.00255884492061,
.00363586332269,
.00039879711931,
.01386291798401,
-.00685704952799,
.00363586332269,
.00751765532203,
.00008982556101,
-.0036338799176,
-.00018629958479,
.00039879711931,
.00008982556101,
.00077550533053]).reshape(5,5)
xb = np.array([ .72428566217422,
.72428566217422,
.56208884716034,
.53160965442657,
.45030161738396,
.45229381322861,
.38432359695435,
.40517011284828,
.36063131690025,
.30754271149635,
.32044330239296,
.29408219456673,
.27966624498367,
.29743707180023,
.25011941790581,
.27747189998627,
.24822402000427,
.23426930606365,
.27233305573463,
.23524768650532,
.26427435874939,
.21787133812904,
.22461311519146,
.22853142023087,
.24335558712482,
.22953669726849,
.25524401664734,
.22482520341873,
.26450532674789,
.31863233447075,
.27352628111839,
.33670437335968,
.25623551011086,
.28701293468475,
.315819054842,
.3238864839077,
.35844340920448,
.34399557113647,
.40348997712135,
.39373970031738,
.4022718667984,
.46476069092751,
.45762005448341,
.46842387318611,
.50536489486694,
.52051961421967,
.47866532206535,
.50378143787384,
.50863671302795,
.4302790760994,
.49568024277687,
.44652271270752,
.43774726986885,
.43010330200195,
.42344436049461,
.44517293572426,
.47460499405861,
.62086409330368,
.52550911903381,
.77532315254211,
.78466820716858,
.85438597202301,
.87056696414948,
1.0393311977386,
.99110960960388,
.85202795267105,
.91560190916061,
.89238166809082,
.88917690515518,
.72121334075928,
.84221452474594,
.8454754948616,
.82078683376312,
.95394861698151,
.84718400239944,
.839300096035,
.91501939296722,
.95743554830551,
1.0874761343002,
1.1326615810394,
1.1169674396515,
1.3300451040268,
1.4790810346603,
1.5027786493301,
1.7226468324661,
1.8395622968674,
1.5940405130386,
1.694568157196,
1.8241587877274,
1.7037791013718,
1.838702917099,
1.7334734201431,
1.4791669845581,
1.3007366657257,
1.7364456653595,
1.2694935798645,
.96595168113708,
1.1405370235443,
1.1328836679459,
1.1091921329498,
1.171138882637,
1.1465038061142,
1.0319484472275,
1.055313706398,
.93150246143341,
1.0844472646713,
.93333613872528,
.93137633800507,
1.0778160095215,
.38748729228973,
.77933365106583,
.75266307592392,
.88410103321075,
.94100385904312,
.91849637031555,
.96046274900436,
.92494148015976,
.98310285806656,
1.0272513628006,
1.0762135982513,
1.0743116140366,
1.254854798317,
1.1723403930664,
1.0479376316071,
1.3550333976746,
1.2255589962006,
1.2870025634766,
1.6643482446671,
1.3312928676605,
1.0657893419266,
1.1804157495499,
1.1335761547089,
1.137326002121,
1.1235628128052,
1.1115798950195,
1.1286649703979,
1.0989991426468,
1.0626485347748,
.96542054414749,
1.0419135093689,
.93033194541931,
.95628559589386,
1.027433514595,
.98328214883804,
1.0063992738724,
1.0645687580109,
.94354963302612,
.95077443122864,
1.0226324796677,
1.089217543602,
.97552293539047,
1.0441918373108,
1.052937746048,
.86785578727722,
.82579529285431,
.95432937145233,
.79897737503052,
.68320548534393,
.85365778207779,
.78336101770401,
.80072748661041,
.9089440703392,
.82500487565994,
.98515397310257,
.96745657920837,
1.0962044000626,
1.195325255394,
1.0824474096298,
1.2239117622375,
1.0142554044724,
1.0399018526077,
.80796521902084,
.7145761847496,
1.0631860494614,
.86374056339264,
.98086261749268,
1.0528303384781,
.86123734712601,
.80300676822662,
.96200370788574,
1.0364016294479,
.98456978797913,
1.1556725502014,
1.2025715112686,
1.0507286787033,
1.312912106514,
1.0682457685471,
2.0334177017212,
1.0775905847549,
1.2798084020615,
1.461397767067,
.72960823774338,
1.2498733997345,
1.466894865036,
1.286082983017,
1.3903408050537,
1.8483582735062,
1.4685434103012,
2.3107523918152,
.7711226940155,
-.31598940491676,
.68151205778122,
1.0212944746017])
y = np.array([np.nan,
29.704284667969,
29.712087631226,
29.881610870361,
29.820302963257,
29.992294311523,
29.934322357178,
30.155170440674,
30.200632095337,
30.117542266846,
30.24044418335,
30.274082183838,
30.319667816162,
30.507436752319,
30.470119476318,
30.657470703125,
30.68822479248,
30.714269638062,
30.962333679199,
30.985248565674,
31.204275131226,
31.16787147522,
31.244613647461,
31.348531723022,
31.523355484009,
31.609535217285,
31.835243225098,
31.874824523926,
32.144504547119,
32.5986328125,
32.723526000977,
33.186702728271,
33.156238555908,
33.387012481689,
33.7158203125,
34.023887634277,
34.458442687988,
34.743995666504,
35.303489685059,
35.693740844727,
36.102272033691,
36.764759063721,
37.257617950439,
37.768424987793,
38.405364990234,
39.020519256592,
39.378665924072,
39.903781890869,
40.408638000488,
40.530277252197,
41.095680236816,
41.346523284912,
41.637748718262,
41.930103302002,
42.223442077637,
42.645172119141,
43.174606323242,
44.320865631104,
44.725509643555,
46.37532043457,
47.584667205811,
48.954383850098,
50.170566558838,
52.039329528809,
53.291107177734,
53.852027893066,
54.915603637695,
55.792385101318,
56.6891746521,
56.821212768555,
57.842212677002,
58.745475769043,
59.5207862854,
60.953948974609,
61.6471824646,
62.439296722412,
63.615020751953,
64.857437133789,
66.587478637695,
68.23265838623,
69.616966247559,
71.930046081543,
74.479080200195,
76.702774047852,
79.722648620605,
82.739562988281,
84.194038391113,
86.394561767578,
89.024154663086,
90.803779602051,
93.33869934082,
95.133476257324,
95.879165649414,
96.300735473633,
99.236442565918,
99.369491577148,
98.865951538086,
99.940536499023,
100.93288421631,
101.90919494629,
103.27114105225,
104.44651031494,
105.13195037842,
106.15531158447,
106.63150024414,
108.08444976807,
108.63333129883,
109.43137359619,
110.9778137207,
109.08748626709,
110.27933502197,
110.95265960693,
112.28410339355,
113.64099884033,
114.71849822998,
115.96046447754,
116.9249420166,
118.18309783936,
119.52725219727,
120.97621154785,
122.27430725098,
124.35485076904,
125.67234039307,
126.44793701172,
128.85502624512,
130.12554931641,
131.78700256348,
135.06434631348,
136.03129577637,
136.16580200195,
137.38041687012,
138.3335723877,
139.43733215332,
140.52355957031,
141.61158752441,
142.82865905762,
143.8990020752,
144.86265563965,
145.46542358398,
146.64192199707,
147.2303314209,
148.15628051758,
149.42742919922,
150.38327026367,
151.50639343262,
152.86457824707,
153.54354858398,
154.45077514648,
155.72262573242,
157.18922424316,
157.97552490234,
159.24418640137,
160.45292663574,
160.7678527832,
161.22578430176,
162.45433044434,
162.79898071289,
162.88320922852,
164.05364990234,
164.68334960938,
165.50071716309,
166.80894470215,
167.52500915527,
169.08515930176,
170.26745605469,
171.99620056152,
173.89532470703,
174.98243713379,
176.82391357422,
177.41424560547,
178.43989562988,
178.40797424316,
178.41456604004,
180.36318969727,
180.86373901367,
182.18086242676,
183.65283203125,
184.06123352051,
184.50300598145,
185.86199951172,
187.33641052246,
188.38456726074,
190.25567626953,
192.00257873535,
192.85073852539,
195.11291503906,
195.76824951172,
201.23341369629,
200.47758483887,
201.97981262207,
204.16139221191,
202.6296081543,
204.82388305664,
207.38688659668,
208.62408447266,
210.52333068848,
214.34335327148,
215.46553039551,
220.92074584961,
217.66012573242,
211.85800170898,
213.35252380371,
215.49029541016])
resid = np.array([np.nan,
-.55428558588028,
-.36208805441856,
-.5116091966629,
-.28030154109001,
-.4422954916954,
-.18432281911373,
-.31516996026039,
-.39063200354576,
-.19754208624363,
-.26044383645058,
-.23408082127571,
-.10966806858778,
-.2874368429184,
-.09011957794428,
-.21747054159641,
-.20822501182556,
-.02426831051707,
-.21233357489109,
-.0452471524477,
-.25427412986755,
-.14787164330482,
-.12461274117231,
-.06853157281876,
-.14335711300373,
-.02953593060374,
-.18524432182312,
.00517434487119,
.13549427688122,
-.14863033592701,
.12647144496441,
-.28670132160187,
-.05623856931925,
.01299012638628,
-.01581981778145,
.07611121237278,
-.05844036862254,
.15600442886353,
-.00349225639366,
.0062618162483,
.19772660732269,
.03523930162191,
.04237993061543,
.13157841563225,
.09463357180357,
-.12051809579134,
.021334676072,
-.00378143391572,
-.30863979458809,
.06972090899944,
-.19567719101906,
-.14652347564697,
-.13774801790714,
-.13010406494141,
-.02344283089042,
.05482704937458,
.52539497613907,
-.12086410820484,
.87448859214783,
.42467761039734,
.51533102989197,
.34561482071877,
.82943379878998,
.2606680393219,
-.29110881686211,
.14797207713127,
-.01560037955642,
.00761602073908,
-.58917766809464,
.17878817021847,
.05778701230884,
-.04547626897693,
.47921240329742,
-.15394935011864,
-.0471847653389,
.26070219278336,
.28498136997223,
.64256292581558,
.51252233982086,
.2673399746418,
.9830310344696,
1.0699564218521,
.72091597318649,
1.2972244024277,
1.1773546934128,
-.13956540822983,
.50595796108246,
.80543184280396,
.07584273815155,
.6962223649025,
.06129856407642,
-.73347336053848,
-.87916851043701,
1.1992633342743,
-1.1364471912384,
-1.4694905281067,
-.0659501478076,
-.14053705334663,
-.13288362324238,
.19080325961113,
.02886573970318,
-.34650835394859,
-.03194846212864,
-.45531520247459,
.36850056052208,
-.38445034623146,
-.13333308696747,
.46862518787384,
-2.2778205871582,
.41251575946808,
-.07933671027422,
.4473415017128,
.4158943593502,
.1590022444725,
.28150060772896,
.03953726217151,
.27505549788475,
.31690016388893,
.37275013327599,
.22378182411194,
.82568991184235,
.14514668285847,
-.27233889698982,
1.052060842514,
.04496052488685,
.37444713711739,
1.6129913330078,
-.36434525251389,
-.93128365278244,
.03420155867934,
-.1804157346487,
-.03357006236911,
-.03733511269093,
-.02355666831136,
.08841699361801,
-.02865886501968,
-.09899909794331,
-.36265158653259,
.13458555936813,
-.34191656112671,
-.03033804148436,
.24371138215065,
-.02743346057832,
.1167239844799,
.29360374808311,
-.26456567645073,
-.04355576634407,
.24922250211239,
.37737664580345,
-.18922370672226,
.22447402775288,
.15580512583256,
-.55293774604797,
-.36785578727722,
.27421084046364,
-.45432937145233,
-.59898042678833,
.31679451465607,
-.1536608338356,
.01664204336703,
.39926943182945,
-.10894102603197,
.57500427961349,
.21484296023846,
.63253426551819,
.7037987112999,
.00467173522338,
.61756485700607,
-.4239239692688,
-.014255377464,
-.83988964557648,
-.70797437429428,
.88542991876602,
-.36318910121918,
.33625638484955,
.41914650797844,
-.4528394639492,
-.36123737692833,
.39699018001556,
.43800541758537,
.06358920782804,
.71544241905212,
.54432433843613,
-.20257151126862,
.94927132129669,
-.41291815042496,
3.4317541122437,
-1.8334206342697,
.22241242229939,
.72019159793854,
-2.2614006996155,
.94440299272537,
1.0961196422577,
-.04889564588666,
.50891524553299,
1.971658706665,
-.34635934233665,
3.1444630622864,
-4.0317454338074,
-5.4861345291138,
.81299871206284,
1.1164767742157,
.89470589160919])
yr = np.array([np.nan,
-.55428558588028,
-.36208805441856,
-.5116091966629,
-.28030154109001,
-.4422954916954,
-.18432281911373,
-.31516996026039,
-.39063200354576,
-.19754208624363,
-.26044383645058,
-.23408082127571,
-.10966806858778,
-.2874368429184,
-.09011957794428,
-.21747054159641,
-.20822501182556,
-.02426831051707,
-.21233357489109,
-.0452471524477,
-.25427412986755,
-.14787164330482,
-.12461274117231,
-.06853157281876,
-.14335711300373,
-.02953593060374,
-.18524432182312,
.00517434487119,
.13549427688122,
-.14863033592701,
.12647144496441,
-.28670132160187,
-.05623856931925,
.01299012638628,
-.01581981778145,
.07611121237278,
-.05844036862254,
.15600442886353,
-.00349225639366,
.0062618162483,
.19772660732269,
.03523930162191,
.04237993061543,
.13157841563225,
.09463357180357,
-.12051809579134,
.021334676072,
-.00378143391572,
-.30863979458809,
.06972090899944,
-.19567719101906,
-.14652347564697,
-.13774801790714,
-.13010406494141,
-.02344283089042,
.05482704937458,
.52539497613907,
-.12086410820484,
.87448859214783,
.42467761039734,
.51533102989197,
.34561482071877,
.82943379878998,
.2606680393219,
-.29110881686211,
.14797207713127,
-.01560037955642,
.00761602073908,
-.58917766809464,
.17878817021847,
.05778701230884,
-.04547626897693,
.47921240329742,
-.15394935011864,
-.0471847653389,
.26070219278336,
.28498136997223,
.64256292581558,
.51252233982086,
.2673399746418,
.9830310344696,
1.0699564218521,
.72091597318649,
1.2972244024277,
1.1773546934128,
-.13956540822983,
.50595796108246,
.80543184280396,
.07584273815155,
.6962223649025,
.06129856407642,
-.73347336053848,
-.87916851043701,
1.1992633342743,
-1.1364471912384,
-1.4694905281067,
-.0659501478076,
-.14053705334663,
-.13288362324238,
.19080325961113,
.02886573970318,
-.34650835394859,
-.03194846212864,
-.45531520247459,
.36850056052208,
-.38445034623146,
-.13333308696747,
.46862518787384,
-2.2778205871582,
.41251575946808,
-.07933671027422,
.4473415017128,
.4158943593502,
.1590022444725,
.28150060772896,
.03953726217151,
.27505549788475,
.31690016388893,
.37275013327599,
.22378182411194,
.82568991184235,
.14514668285847,
-.27233889698982,
1.052060842514,
.04496052488685,
.37444713711739,
1.6129913330078,
-.36434525251389,
-.93128365278244,
.03420155867934,
-.1804157346487,
-.03357006236911,
-.03733511269093,
-.02355666831136,
.08841699361801,
-.02865886501968,
-.09899909794331,
-.36265158653259,
.13458555936813,
-.34191656112671,
-.03033804148436,
.24371138215065,
-.02743346057832,
.1167239844799,
.29360374808311,
-.26456567645073,
-.04355576634407,
.24922250211239,
.37737664580345,
-.18922370672226,
.22447402775288,
.15580512583256,
-.55293774604797,
-.36785578727722,
.27421084046364,
-.45432937145233,
-.59898042678833,
.31679451465607,
-.1536608338356,
.01664204336703,
.39926943182945,
-.10894102603197,
.57500427961349,
.21484296023846,
.63253426551819,
.7037987112999,
.00467173522338,
.61756485700607,
-.4239239692688,
-.014255377464,
-.83988964557648,
-.70797437429428,
.88542991876602,
-.36318910121918,
.33625638484955,
.41914650797844,
-.4528394639492,
-.36123737692833,
.39699018001556,
.43800541758537,
.06358920782804,
.71544241905212,
.54432433843613,
-.20257151126862,
.94927132129669,
-.41291815042496,
3.4317541122437,
-1.8334206342697,
.22241242229939,
.72019159793854,
-2.2614006996155,
.94440299272537,
1.0961196422577,
-.04889564588666,
.50891524553299,
1.971658706665,
-.34635934233665,
3.1444630622864,
-4.0317454338074,
-5.4861345291138,
.81299871206284,
1.1164767742157,
.89470589160919])
mse = np.array([ 1.1115040779114,
.69814515113831,
.63478744029999,
.63409090042114,
.63356643915176,
.63317084312439,
.63287192583084,
.63264590501785,
.63247483968735,
.63234525918961,
.63224703073502,
.63217264413834,
.63211619853973,
.63207340240479,
.63204091787338,
.63201630115509,
.63199764490128,
.63198345899582,
.63197267055511,
.63196450471878,
.63195830583572,
.63195365667343,
.63195008039474,
.63194733858109,
.63194531202316,
.6319437623024,
.6319425702095,
.63194167613983,
.63194096088409,
.63194048404694,
.63194006681442,
.6319397687912,
.63193953037262,
.63193941116333,
.6319392323494,
.63193917274475,
.63193905353546,
.63193899393082,
.63193899393082,
.63193893432617,
.63193893432617,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688])
stdp = np.array([ .72428566217422,
.72428566217422,
.56208884716034,
.53160965442657,
.45030161738396,
.45229381322861,
.38432359695435,
.40517011284828,
.36063131690025,
.30754271149635,
.32044330239296,
.29408219456673,
.27966624498367,
.29743707180023,
.25011941790581,
.27747189998627,
.24822402000427,
.23426930606365,
.27233305573463,
.23524768650532,
.26427435874939,
.21787133812904,
.22461311519146,
.22853142023087,
.24335558712482,
.22953669726849,
.25524401664734,
.22482520341873,
.26450532674789,
.31863233447075,
.27352628111839,
.33670437335968,
.25623551011086,
.28701293468475,
.315819054842,
.3238864839077,
.35844340920448,
.34399557113647,
.40348997712135,
.39373970031738,
.4022718667984,
.46476069092751,
.45762005448341,
.46842387318611,
.50536489486694,
.52051961421967,
.47866532206535,
.50378143787384,
.50863671302795,
.4302790760994,
.49568024277687,
.44652271270752,
.43774726986885,
.43010330200195,
.42344436049461,
.44517293572426,
.47460499405861,
.62086409330368,
.52550911903381,
.77532315254211,
.78466820716858,
.85438597202301,
.87056696414948,
1.0393311977386,
.99110960960388,
.85202795267105,
.91560190916061,
.89238166809082,
.88917690515518,
.72121334075928,
.84221452474594,
.8454754948616,
.82078683376312,
.95394861698151,
.84718400239944,
.839300096035,
.91501939296722,
.95743554830551,
1.0874761343002,
1.1326615810394,
1.1169674396515,
1.3300451040268,
1.4790810346603,
1.5027786493301,
1.7226468324661,
1.8395622968674,
1.5940405130386,
1.694568157196,
1.8241587877274,
1.7037791013718,
1.838702917099,
1.7334734201431,
1.4791669845581,
1.3007366657257,
1.7364456653595,
1.2694935798645,
.96595168113708,
1.1405370235443,
1.1328836679459,
1.1091921329498,
1.171138882637,
1.1465038061142,
1.0319484472275,
1.055313706398,
.93150246143341,
1.0844472646713,
.93333613872528,
.93137633800507,
1.0778160095215,
.38748729228973,
.77933365106583,
.75266307592392,
.88410103321075,
.94100385904312,
.91849637031555,
.96046274900436,
.92494148015976,
.98310285806656,
1.0272513628006,
1.0762135982513,
1.0743116140366,
1.254854798317,
1.1723403930664,
1.0479376316071,
1.3550333976746,
1.2255589962006,
1.2870025634766,
1.6643482446671,
1.3312928676605,
1.0657893419266,
1.1804157495499,
1.1335761547089,
1.137326002121,
1.1235628128052,
1.1115798950195,
1.1286649703979,
1.0989991426468,
1.0626485347748,
.96542054414749,
1.0419135093689,
.93033194541931,
.95628559589386,
1.027433514595,
.98328214883804,
1.0063992738724,
1.0645687580109,
.94354963302612,
.95077443122864,
1.0226324796677,
1.089217543602,
.97552293539047,
1.0441918373108,
1.052937746048,
.86785578727722,
.82579529285431,
.95432937145233,
.79897737503052,
.68320548534393,
.85365778207779,
.78336101770401,
.80072748661041,
.9089440703392,
.82500487565994,
.98515397310257,
.96745657920837,
1.0962044000626,
1.195325255394,
1.0824474096298,
1.2239117622375,
1.0142554044724,
1.0399018526077,
.80796521902084,
.7145761847496,
1.0631860494614,
.86374056339264,
.98086261749268,
1.0528303384781,
.86123734712601,
.80300676822662,
.96200370788574,
1.0364016294479,
.98456978797913,
1.1556725502014,
1.2025715112686,
1.0507286787033,
1.312912106514,
1.0682457685471,
2.0334177017212,
1.0775905847549,
1.2798084020615,
1.461397767067,
.72960823774338,
1.2498733997345,
1.466894865036,
1.286082983017,
1.3903408050537,
1.8483582735062,
1.4685434103012,
2.3107523918152,
.7711226940155,
-.31598940491676,
.68151205778122,
1.0212944746017])
icstats = np.array([ 202,
np.nan,
-240.29558272688,
5,
490.59116545376,
507.13250394077])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| bsd-3-clause |
EvanK/ansible | test/sanity/code-smell/update-bundled.py | 24 | 4649 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import json
import os
import os.path
import re
import sys
from distutils.version import LooseVersion
import packaging.specifiers
from ansible.module_utils.urls import open_url
BUNDLED_RE = re.compile(b'\\b_BUNDLED_METADATA\\b')
def get_bundled_libs(paths):
bundled_libs = set()
for filename in fnmatch.filter(paths, 'lib/ansible/compat/*/__init__.py'):
bundled_libs.add(filename)
bundled_libs.add('lib/ansible/module_utils/distro/__init__.py')
bundled_libs.add('lib/ansible/module_utils/six/__init__.py')
bundled_libs.add('lib/ansible/module_utils/compat/ipaddress.py')
# backports.ssl_match_hostname should be moved to its own file in the future
bundled_libs.add('lib/ansible/module_utils/urls.py')
return bundled_libs
def get_files_with_bundled_metadata(paths):
with_metadata = set()
for path in paths:
if path == 'test/sanity/code-smell/update-bundled.py':
continue
with open(path, 'rb') as f:
body = f.read()
if BUNDLED_RE.search(body):
with_metadata.add(path)
return with_metadata
def get_bundled_metadata(filename):
with open(filename, 'r') as module:
for line in module:
if line.strip().startswith('_BUNDLED_METADATA'):
data = line[line.index('{'):].strip()
break
else:
raise ValueError('Unable to check bundled library for update. Please add'
' _BUNDLED_METADATA dictionary to the library file with'
' information on pypi name and bundled version.')
metadata = json.loads(data)
return metadata
def get_latest_applicable_version(pypi_data, constraints=None):
latest_version = "0"
if 'version_constraints' in metadata:
version_specification = packaging.specifiers.SpecifierSet(metadata['version_constraints'])
for version in pypi_data['releases']:
if version in version_specification:
if LooseVersion(version) > LooseVersion(latest_version):
latest_version = version
else:
latest_version = pypi_data['info']['version']
return latest_version
if __name__ == '__main__':
paths = sys.argv[1:] or sys.stdin.read().splitlines()
bundled_libs = get_bundled_libs(paths)
files_with_bundled_metadata = get_files_with_bundled_metadata(paths)
for filename in files_with_bundled_metadata.difference(bundled_libs):
print('{0}: ERROR: File contains _BUNDLED_METADATA but needs to be added to'
' test/sanity/code-smell/update-bundled.py'.format(filename))
for filename in bundled_libs:
try:
metadata = get_bundled_metadata(filename)
except ValueError as e:
print('{0}: ERROR: {1}'.format(filename, e))
continue
except (IOError, OSError) as e:
if e.errno == 2:
print('{0}: ERROR: {1}. Perhaps the bundled library has been removed'
' or moved and the bundled library test needs to be modified as'
' well?'.format(filename, e))
pypi_fh = open_url('https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name']))
pypi_data = json.loads(pypi_fh.read().decode('utf-8'))
constraints = metadata.get('version_constraints', None)
latest_version = get_latest_applicable_version(pypi_data, constraints)
if LooseVersion(metadata['version']) < LooseVersion(latest_version):
print('{0}: UPDATE {1} from {2} to {3} {4}'.format(
filename,
metadata['pypi_name'],
metadata['version'],
latest_version,
'https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name'])))
| gpl-3.0 |
iwob/pysv | pysv/parsers/ply/yacc.py | 44 | 137322 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.10'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| mit |
ukanga/SickRage | lib/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| gpl-3.0 |
pfhayes/boto | boto/directconnect/__init__.py | 145 | 1679 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS DirectConnect service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.directconnect.layer1 import DirectConnectConnection
return get_regions('directconnect', connection_cls=DirectConnectConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
barca/Nano_Tournament | tournament.py | 1 | 5742 | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
#connects to db
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
#deltes match records from database
def deleteMatches():
"""Remove all the match records from the database."""
conn = connect()
c = conn.cursor()
c.execute("DELETE FROM history;")
c.execute("UPDATE matches SET wins = 0;")
conn.commit()
#remove just the players
def deletePlayers():
"""Remove all the player records from the database."""
conn = connect()
c = conn.cursor()
c.execute("DELETE FROM matches")
c.execute("DELETE FROM history")
conn.commit()
#returns number of players
def countPlayers():
conn = connect()
c = conn.cursor()
c.execute("SELECT COUNT (*) FROM matches;")
count = c.fetchall()
conn.commit()
return count[0][0]
"""Returns the number of players currently registered."""
#adds player to db, with 0 wins, and 0 matches already played
def registerPlayer(name):
conn = connect()
c = conn.cursor()
c.execute("INSERT INTO matches (name, wins, matches) VALUES (%s, %s, %s)",(name,0,0,))
conn.commit()
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
#returns a playerlist, sorted by wins in decending order
def playerStandings():
conn = connect()
c = conn.cursor()
c.execute("SELECT id, name, wins, matches FROM matches ORDER BY wins DESC")
players = c.fetchall()
conn.commit()
return players
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
#follows spec, adds match winner and loser, increments match number
def reportMatch(winner, loser):
conn = connect()
c = conn.cursor()
c.execute("SELECT wins FROM matches WHERE id = ('%s')",(winner,))
wins = c.fetchall()
wins = wins[0][0] + 1
c.execute("SELECT matches FROM matches WHERE id = ('%s')",(winner,))
matches = c.fetchall()
matches = matches[0][0] + 1
c.execute("UPDATE matches set wins = ('%s') WHERE id = ('%s')",(wins,winner,))
c.execute("UPDATE matches set matches = ('%s') WHERE id = ('%s')",(matches,winner,))
c.execute("SELECT matches FROM matches WHERE id = ('%s')",(loser,))
matches = c.fetchall()
matches = matches[0][0] + 1
c.execute("UPDATE matches set matches = ('%s') WHERE id = ('%s')",(matches,loser,))
if(winner < loser):
c.execute("INSERT into history (player1Id, player2Id) VALUES (%s, %s)",(winner, loser,))
else:
c.execute("INSERT into history (player1Id, player2Id) VALUES (%s, %s)",(loser, winner,))
conn.commit()
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
#the user can be paired together provided they do not have a match history stored in the database, nor have they been previously paired
def can_pair(player_pair, rtn):
player1Id = player_pair[0]
player2Id = player_pair[2]
conn = connect()
c = conn.cursor()
if(player1Id < player2Id):
c.execute("SELECT * FROM history where player1Id = ('%s') and player2Id = ('%s')", (player1Id, player2Id,))
else:
c.execute("SELECT * FROM history where player1Id = ('%s') and player2Id = ('%s')", (player2Id, player1Id,))
matches = c.fetchall()
return (matches == [] and player1Id not in rtn and player2Id not in rtn)
#the swiss pairings will take into account the history of the users, and not match anyone previously paired
def swissPairings():
conn = connect()
c = conn.cursor()
c.execute("SELECT id, name FROM matches ORDER BY wins DESC")
to_pair = c.fetchall()
rtn = []
history = []
for each in range(0, len(to_pair) - 1):
if(can_pair((to_pair[each] + to_pair[each+1]), history)):
rtn.append((to_pair[each] + to_pair[each+1]))
history.append(to_pair[each][0])
history.append(to_pair[each + 1][0])
else:
for sub_check in range(each + 1, len(to_pair) -1):
if(can_pair((to_pair[each] + to_pair[sub_check]), history)):
rtn.append(to_pair[each] + to_pair[sub_check])
history.append(to_pair[each][0])
history.append(to_pair[sub_check][0])
break
return rtn
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
| mit |
ICTU/quality-time | components/collector/tests/source_collectors/quality_time/test_missing_metrics.py | 1 | 3549 | """Unit tests for the Quality-time missing metrics collector."""
from .base import QualityTimeTestCase
class QualityTimeMissingMetricsTest(QualityTimeTestCase):
"""Unit tests for the Quality-time missing metrics collector."""
METRIC_TYPE = "missing_metrics"
def setUp(self):
"""Set up test data."""
super().setUp()
self.set_source_parameter("reports", ["r1", "r2"])
self.expected_software_metrics = str(len(self.data_model["subjects"]["software"]["metrics"]))
self.reports["reports"].append(
dict(
title="R2",
report_uuid="r2",
subjects=dict(
s2=dict(
type="software",
name="S2",
metrics=dict(
m21=dict(
tags=["security"],
scale="count",
type="violations",
target="1",
sources=dict(s1=dict(type="sonarqube")),
),
m22=dict(
tags=["security"],
scale="count",
type="loc",
target="1",
sources=dict(s1=dict(type="sonarqube")),
),
m23=dict(
tags=["security"],
scale="count",
type="accessibility",
target="1",
sources=dict(s1=dict(type="sonarqube")),
),
),
)
),
),
)
self.entities = [
dict(
key=metric_type,
metric_type=self.data_model["metrics"][metric_type]["name"],
reports="R1, R2",
subject_type="Software",
)
for metric_type in self.data_model["subjects"]["software"]["metrics"]
if metric_type not in ["violations", "accessibility", "loc"]
]
async def test_nr_of_metrics(self):
"""Test that the number of missing_metrics is returned."""
response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])
self.assert_measurement(
response,
value=str(len(self.entities)),
total=self.expected_software_metrics,
entities=self.entities,
)
async def test_nr_of_missing_metrics_without_reports(self):
"""Test that no reports in the parameter equals all reports."""
self.set_source_parameter("reports", [])
response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])
self.assert_measurement(
response, value=str(len(self.entities)), total=self.expected_software_metrics, entities=self.entities
)
async def test_nr_of_missing_metrics_without_correct_report(self):
"""Test that an error is thrown for reports that don't exist."""
self.reports["reports"] = []
response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])
self.assert_measurement(response, value=None, parse_error="No reports found with title or id", entities=[])
| apache-2.0 |
nttks/edx-platform | common/djangoapps/util/keyword_substitution.py | 148 | 2343 | """
keyword_substitution.py
Contains utility functions to help substitute keywords in a text body with
the appropriate user / course data.
Supported:
LMS:
- %%USER_ID%% => anonymous user id
- %%USER_FULLNAME%% => User's full name
- %%COURSE_DISPLAY_NAME%% => display name of the course
- %%COURSE_END_DATE%% => end date of the course
Usage:
Call substitute_keywords_with_data where substitution is
needed. Currently called in:
- LMS: Announcements + Bulk emails
- CMS: Not called
"""
from django.contrib.auth.models import User
from student.models import anonymous_id_for_user
def anonymous_id_from_user_id(user_id):
"""
Gets a user's anonymous id from their user id
"""
user = User.objects.get(id=user_id)
return anonymous_id_for_user(user, None)
def substitute_keywords(string, user_id, context):
"""
Replaces all %%-encoded words using KEYWORD_FUNCTION_MAP mapping functions
Iterates through all keywords that must be substituted and replaces
them by calling the corresponding functions stored in KEYWORD_FUNCTION_MAP.
Functions stored in KEYWORD_FUNCTION_MAP must return a replacement string.
"""
# do this lazily to avoid unneeded database hits
KEYWORD_FUNCTION_MAP = {
'%%USER_ID%%': lambda: anonymous_id_from_user_id(user_id),
'%%USER_FULLNAME%%': lambda: context.get('name'),
'%%COURSE_DISPLAY_NAME%%': lambda: context.get('course_title'),
'%%COURSE_END_DATE%%': lambda: context.get('course_end_date'),
}
for key in KEYWORD_FUNCTION_MAP.keys():
if key in string:
substitutor = KEYWORD_FUNCTION_MAP[key]
string = string.replace(key, substitutor())
return string
def substitute_keywords_with_data(string, context):
"""
Given an email context, replaces all %%-encoded words in the given string
`context` is a dictionary that should include `user_id` and `course_title`
keys
"""
# Do not proceed without parameters: Compatibility check with existing tests
# that do not supply these parameters
user_id = context.get('user_id')
course_title = context.get('course_title')
if user_id is None or course_title is None:
return string
return substitute_keywords(string, user_id, context)
| agpl-3.0 |
kingvuplus/Test-OBH | lib/python/Components/Element.py | 47 | 2938 | from Tools.CList import CList
# down up
# Render Converter Converter Source
# a bidirectional connection
def cached(f):
name = f.__name__
def wrapper(self):
cache = self.cache
if cache is None:
return f(self)
if name not in cache:
cache[name] = (True, f(self))
return cache[name][1]
return wrapper
class ElementError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return self.msg
class Element(object):
CHANGED_DEFAULT = 0 # initial "pull" state
CHANGED_ALL = 1 # really everything changed
CHANGED_CLEAR = 2 # we're expecting a real update soon. don't bother polling NOW, but clear data.
CHANGED_SPECIFIC = 3 # second tuple will specify what exactly changed
CHANGED_POLL = 4 # a timer expired
SINGLE_SOURCE = True
def __init__(self):
self.downstream_elements = CList()
self.master = None
self.sources = [ ]
self.source = None
self.__suspended = True
self.cache = None
def connectDownstream(self, downstream):
self.downstream_elements.append(downstream)
if self.master is None:
self.master = downstream
def connectUpstream(self, upstream):
assert not self.SINGLE_SOURCE or self.source is None
self.sources.append(upstream)
# self.source always refers to the last recent source added.
self.source = upstream
self.changed((self.CHANGED_DEFAULT,))
def connect(self, upstream):
self.connectUpstream(upstream)
upstream.connectDownstream(self)
# we disconnect from down to up
def disconnectAll(self):
# we should not disconnect from upstream if
# there are still elements depending on us.
assert len(self.downstream_elements) == 0, "there are still downstream elements left"
# Sources don't have a source themselves. don't do anything here.
for s in self.sources:
s.disconnectDownstream(self)
if self.source:
# sources are owned by the Screen, so don't destroy them here.
self.destroy()
self.source = None
self.sources = [ ]
def disconnectDownstream(self, downstream):
self.downstream_elements.remove(downstream)
if self.master == downstream:
self.master = None
if len(self.downstream_elements) == 0:
self.disconnectAll()
# default action: push downstream
def changed(self, *args, **kwargs):
self.cache = { }
self.downstream_elements.changed(*args, **kwargs)
self.cache = None
def setSuspend(self, suspended):
changed = self.__suspended != suspended
if not self.__suspended and suspended:
self.doSuspend(1)
elif self.__suspended and not suspended:
self.doSuspend(0)
self.__suspended = suspended
if changed:
for s in self.sources:
s.checkSuspend()
suspended = property(lambda self: self.__suspended, setSuspend)
def checkSuspend(self):
self.suspended = reduce(lambda x, y: x and y.__suspended, self.downstream_elements, True)
def doSuspend(self, suspend):
pass
def destroy(self):
pass
| gpl-2.0 |
switchboardOp/ansible | test/integration/cleanup_ec2.py | 25 | 6920 | '''
Find and delete AWS resources matching the provided --match string. Unless
--yes|-y is provided, the prompt for confirmation prior to deleting resources.
Please use caution, you can easily delete you're *ENTIRE* EC2 infrastructure.
'''
import boto
import boto.ec2.elb
import optparse
import os
import os.path
import re
import sys
import time
import yaml
def delete_aws_resources(get_func, attr, opts):
for item in get_func():
val = getattr(item, attr)
if re.search(opts.match_re, val):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def delete_autoscaling_group(get_func, attr, opts):
assumeyes = opts.assumeyes
group_name = None
for item in get_func():
group_name = getattr(item, attr)
if re.search(opts.match_re, group_name):
if not opts.assumeyes:
assumeyes = raw_input("Delete matching %s? [y/n]: " % (item).lower()) == 'y'
break
if assumeyes and group_name:
groups = asg.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = asg.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(asg.get_all_groups(names=[group_name])):
time.sleep(5)
print("Terminated ASG: %s" % group_name)
def delete_aws_eips(get_func, attr, opts):
# the file might not be there if the integration test wasn't run
try:
eip_log = open(opts.eip_log, 'r').read().splitlines()
except IOError:
print('%s not found.' % opts.eip_log)
return
for item in get_func():
val = getattr(item, attr)
if val in eip_log:
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def delete_aws_instances(reservation, opts):
for list in reservation:
for item in list.instances:
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'delete') or hasattr(item, 'terminate'), "Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
print("Deleted %s" % item)
if hasattr(item, 'terminate'):
item.terminate()
print("Terminated %s" % item)
def parse_args():
# Load details from credentials.yml
default_aws_access_key = os.environ.get('AWS_ACCESS_KEY', None)
default_aws_secret_key = os.environ.get('AWS_SECRET_KEY', None)
if os.path.isfile('credentials.yml'):
credentials = yaml.load(open('credentials.yml', 'r'))
if default_aws_access_key is None:
default_aws_access_key = credentials['ec2_access_key']
if default_aws_secret_key is None:
default_aws_secret_key = credentials['ec2_secret_key']
parser = optparse.OptionParser(
usage="%s [options]" % (sys.argv[0], ),
description=__doc__
)
parser.add_option(
"--access",
action="store", dest="ec2_access_key",
default=default_aws_access_key,
help="Amazon ec2 access id. Can use EC2_ACCESS_KEY environment variable, or a values from credentials.yml."
)
parser.add_option(
"--secret",
action="store", dest="ec2_secret_key",
default=default_aws_secret_key,
help="Amazon ec2 secret key. Can use EC2_SECRET_KEY environment variable, or a values from credentials.yml."
)
parser.add_option(
"--eip-log",
action="store", dest="eip_log",
default=None,
help="Path to log of EIPs created during test."
)
parser.add_option(
"--integration-config",
action="store", dest="int_config",
default="integration_config.yml",
help="path to integration config"
)
parser.add_option(
"--credentials", "-c",
action="store", dest="credential_file",
default="credentials.yml",
help="YAML file to read cloud credentials (default: %default)"
)
parser.add_option(
"--yes", "-y",
action="store_true", dest="assumeyes",
default=False,
help="Don't prompt for confirmation"
)
parser.add_option(
"--match",
action="store", dest="match_re",
default="^ansible-testing-",
help="Regular expression used to find AWS resources (default: %default)"
)
(opts, args) = parser.parse_args()
for required in ['ec2_access_key', 'ec2_secret_key']:
if getattr(opts, required) is None:
parser.error("Missing required parameter: --%s" % required)
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
int_config = yaml.load(open(opts.int_config).read())
if not opts.eip_log:
output_dir = os.path.expanduser(int_config["output_dir"])
opts.eip_log = output_dir + '/' + opts.match_re.replace('^', '') + '-eip_integration_tests.log'
# Connect to AWS
aws = boto.connect_ec2(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
elb = boto.connect_elb(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
asg = boto.connect_autoscale(aws_access_key_id=opts.ec2_access_key,
aws_secret_access_key=opts.ec2_secret_key)
try:
# Delete matching keys
delete_aws_resources(aws.get_all_key_pairs, 'name', opts)
# Delete matching security groups
delete_aws_resources(aws.get_all_security_groups, 'name', opts)
# Delete matching ASGs
delete_autoscaling_group(asg.get_all_groups, 'name', opts)
# Delete matching launch configs
delete_aws_resources(asg.get_all_launch_configurations, 'name', opts)
# Delete ELBs
delete_aws_resources(elb.get_all_load_balancers, 'name', opts)
# Delete recorded EIPs
delete_aws_eips(aws.get_all_addresses, 'public_ip', opts)
# Delete temporary instances
filters = {"tag:Name": opts.match_re.replace('^', ''), "instance-state-name": ['running', 'pending', 'stopped']}
delete_aws_instances(aws.get_all_instances(filters=filters), opts)
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
arante/pyloc | microblog/flask/lib/python3.5/site-packages/flask/__init__.py | 47 | 1673 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.12.2'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that Flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| gpl-3.0 |
wisner23/serenata-de-amor | src/xml2csv.py | 1 | 2314 | import json
import sys
from csv import DictWriter
from datetime import datetime
from io import StringIO
from bs4 import BeautifulSoup
from lxml.etree import iterparse
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
HTML_FILE_PATH = 'data/2016-08-08-datasets-format.html'
def output(*args, **kwargs):
"""Helper to print messages with a date/time marker"""
now = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
return print(now, *args, **kwargs)
def xml_parser(xml_path, tag='DESPESA'):
"""
Generator that parses the XML yielding a StringIO object for each record
found. The StringIO holds the data in JSON format.
"""
for event, element in iterparse(xml_path, tag=tag):
# get data
fields = {c.tag: c.text for c in element.iter() if c.tag != tag}
element.clear()
# export in JSON format
yield StringIO(json.dumps(fields))
def csv_header(html_path):
"""
Generator that yields the CSV headers reading them from a HTML file (e.g.
datasets-format.html).
"""
yield 'ideDocumento' # this field is missing from the reference
with open(html_path, 'rb') as file_handler:
parsed = BeautifulSoup(file_handler.read(), 'lxml')
for row in parsed.select('.tabela-2 tr'):
try:
yield row.select('td')[0].text.strip()
except IndexError:
pass
def create_csv(csv_path, headers):
"""Creates the CSV file with the headers (must be a list)"""
with open(csv_path, 'w') as csv_file:
writer = DictWriter(csv_file, fieldnames=headers)
writer.writeheader()
output('Creating the CSV file')
headers = list(csv_header(HTML_FILE_PATH))
create_csv(CSV_FILE_PATH, headers)
output('Reading the XML file')
count = 1
for json_io in xml_parser(XML_FILE_PATH):
# convert json to csv
csv_io = StringIO()
writer = DictWriter(csv_io, fieldnames=headers)
writer.writerow(json.loads(json_io.getvalue()))
output('Writing record #{:,} to the CSV'.format(count), end='\r')
with open(CSV_FILE_PATH, 'a') as csv_file:
print(csv_io.getvalue(), file=csv_file)
csv_io.close()
json_io.close()
csv_io.close()
count += 1
print('') # clean the last output (the one with end='\r')
output('Done!')
| mit |
ichuang/sympy | sympy/physics/tests/test_gaussopt.py | 3 | 3686 | from sympy import atan2, factor, Float, I, Matrix, N, oo, pi, sqrt, symbols
from sympy.physics.gaussopt import (BeamParameter, CurvedMirror,
CurvedRefraction, FlatMirror, FlatRefraction, FreeSpace, GeometricRay,
RayTransferMatrix, ThinLens, conjugate_gauss_beams,
gaussian_conj , geometric_conj_ab, geometric_conj_af, geometric_conj_bf,
rayleigh2waist, waist2rayleigh)
def streq(a, b):
return str(a) == str(b)
def test_gauss_opt():
mat = RayTransferMatrix(1,2,3,4)
assert mat == Matrix([[1, 2],[3, 4]])
assert mat == RayTransferMatrix( Matrix([[1,2],[3,4]]) )
assert [mat.A, mat.B, mat.C, mat.D] == [1, 2, 3, 4]
d, f, h, n1, n2, R = symbols('d f h n1 n2 R')
lens = ThinLens(f)
assert lens == Matrix([[ 1, 0], [-1/f, 1]])
assert lens.C == -1/f
assert FreeSpace(d) == Matrix([[ 1, d], [0, 1]])
assert FlatRefraction(n1, n2) == Matrix([[1, 0], [0, n1/n2]])
assert CurvedRefraction(R, n1, n2) == Matrix([[1, 0], [(n1 - n2)/(R*n2), n1/n2]])
assert FlatMirror() == Matrix([[1, 0], [0, 1]])
assert CurvedMirror(R) == Matrix([[ 1, 0], [-2/R, 1]])
assert ThinLens(f) == Matrix([[ 1, 0], [-1/f, 1]])
mul = CurvedMirror(R)*FreeSpace(d)
mul_mat = Matrix([[ 1, 0], [-2/R, 1]])*Matrix([[ 1, d], [0, 1]])
assert mul.A == mul_mat[0,0]
assert mul.B == mul_mat[0,1]
assert mul.C == mul_mat[1,0]
assert mul.D == mul_mat[1,1]
angle = symbols('angle')
assert GeometricRay(h,angle) == Matrix([[ h], [angle]])
assert FreeSpace(d)*GeometricRay(h,angle) == Matrix([[angle*d + h], [angle]])
assert GeometricRay( Matrix( ((h,),(angle,)) ) ) == Matrix([[h], [angle]])
assert (FreeSpace(d)*GeometricRay(h,angle)).height == angle*d + h
assert (FreeSpace(d)*GeometricRay(h,angle)).angle == angle
p = BeamParameter(530e-9, 1, w=1e-3)
assert streq(p.q, 1 + 1.88679245283019*I*pi)
assert streq(N(p.q), 1.0 + 5.92753330865999*I)
assert streq(N(p.w_0), Float(0.00100000000000000))
assert streq(N(p.z_r), Float(5.92753330865999))
fs = FreeSpace(10)
p1 = fs*p
assert streq(N(p.w), Float(0.00101413072159615))
assert streq(N(p1.w), Float(0.00210803120913829))
w, wavelen = symbols('w wavelen')
assert waist2rayleigh(w, wavelen) == pi*w**2/wavelen
z_r, wavelen = symbols('z_r wavelen')
assert rayleigh2waist(z_r, wavelen) == sqrt(wavelen*z_r)/sqrt(pi)
a, b, f = symbols('a b f')
assert geometric_conj_ab(a, b) == a*b/(a + b)
assert geometric_conj_af(a, f) == a*f/(a - f)
assert geometric_conj_bf(b, f) == b*f/(b - f)
assert geometric_conj_ab(oo, b) == b
assert geometric_conj_ab(a, oo) == a
s_in, z_r_in, f = symbols('s_in z_r_in f')
assert gaussian_conj(s_in, z_r_in, f)[0] == 1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
assert gaussian_conj(s_in, z_r_in, f)[1] == z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
assert gaussian_conj(s_in, z_r_in, f)[2] == 1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
l, w_i, w_o, f = symbols('l w_i w_o f')
assert conjugate_gauss_beams(l, w_i, w_o, f=f)[0] == f*(-sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)) + 1)
assert factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1]) == f*w_o**2*(w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))/w_i**2
assert conjugate_gauss_beams(l, w_i, w_o, f=f)[2] == f
z, l, w = symbols('z l r', positive=True)
p = BeamParameter(l, z, w=w)
assert p.radius == z*(l**2*z**2/(pi**2*w**4) + 1)
assert p.w == w*sqrt(l**2*z**2/(pi**2*w**4) + 1)
assert p.w_0 == w
assert p.divergence == l/(pi*w)
assert p.gouy == atan2(z, pi*w**2/l)
assert p.waist_approximation_limit == 2*l/pi
| bsd-3-clause |
transitland/mapzen-gtfs | mzgtfs/serviceperiod.py | 3 | 2661 | """GTFS ServicePeriod entity."""
import datetime
import entity
import geom
import util
import validation
class ServicePeriod(entity.Entity):
KEY = 'service_id'
REQUIRED = [
'service_id',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday',
'start_date',
'end_date'
]
OPTIONAL = [
]
def start(self):
return datetime.datetime.strptime(self.get('start_date'), '%Y%m%d')
def end(self):
return datetime.datetime.strptime(self.get('end_date'), '%Y%m%d')
def validate(self, validator=None):
validator = super(ServicePeriod, self).validate(validator)
with validator(self):
assert self.get('service_id'), "Required: service_id"
with validator(self):
assert validation.valid_bool(self.get('monday')), "Required: monday"
with validator(self):
assert validation.valid_bool(self.get('tuesday')), "Required: tuesday"
with validator(self):
assert validation.valid_bool(self.get('wednesday')), "Required: wednesday"
with validator(self):
assert validation.valid_bool(self.get('thursday')), "Required: thursday"
with validator(self):
assert validation.valid_bool(self.get('friday')), "Required: friday"
with validator(self):
assert validation.valid_bool(self.get('saturday')), "Required: saturday"
with validator(self):
assert validation.valid_bool(self.get('sunday')), "Required: sunday"
with validator(self):
assert validation.valid_date(self.get('start_date'), empty=True), "Invalid start_date"
with validator(self):
assert validation.valid_date(self.get('start_end'), empty=True), "Invalid start_end"
with validator(self):
assert self.end() >= self.start(), \
"Invalid end_date, must be at least start_date"
# TODO: Warnings
# - no days of the week
return validator
class ServiceDate(entity.Entity):
REQUIRED = [
'service_id',
'date',
'exception_type'
]
def validate(self, validator=None):
validator = super(ServiceDate, self).validate(validator)
with validator(self):
assert self.get('service_id'), "Required: service_id"
with validator(self):
assert self.get('date'), "Required: date"
with validator(self):
assert validation.valid_int(self.get('exception_type'), vmin=1, vmax=2), \
"Invalid exception_type"
return validator
def validate_feed(self, validator=None):
validator = super(ServiceDate, self).validate_feed(validator)
with validator(self):
assert self._feed.service_period(self.get('service_id')), \
"Unknown service_id"
return validator
| mit |
onceuponatimeforever/oh-mainline | mysite/search/migrations/0031_cache_contributor_count.py | 17 | 4562 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Project.cached_contributor_count'
db.add_column('search_project', 'cached_contributor_count', orm['search.project:cached_contributor_count'])
def backwards(self, orm):
# Deleting field 'Project.cached_contributor_count'
db.delete_column('search_project', 'cached_contributor_count')
models = {
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
| agpl-3.0 |
AOKP/external_chromium_org | content/browser/tracing/generate_trace_viewer_grd.py | 48 | 2407 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a grd file for packaging the trace-viewer files.
This file is modified from the devtools generate_devtools_grd.py file.
"""
import errno
import os
import shutil
import sys
from xml.dom import minidom
kTracingResourcePrefix = 'IDR_TRACING_'
kGrdTemplate = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="grit/tracing_resources.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
<output filename="tracing_resources.pak" type="data_package" />
<output filename="tracing_resources.rc" type="rc_all" />
</outputs>
<release seq="1">
<includes>
<if expr="not is_android"></if>
</includes>
</release>
</grit>
'''
class ParsedArgs:
def __init__(self, source_files, output_filename):
self.source_files = source_files
self.output_filename = output_filename
def parse_args(argv):
output_position = argv.index('--output')
source_files = argv[:output_position]
return ParsedArgs(source_files, argv[output_position + 1])
def make_name_from_filename(filename):
return kTracingResourcePrefix + (os.path.splitext(filename)[1][1:]).upper()
def add_file_to_grd(grd_doc, filename):
includes_node = grd_doc.getElementsByTagName('if')[0]
includes_node.appendChild(grd_doc.createTextNode('\n '))
new_include_node = grd_doc.createElement('include')
new_include_node.setAttribute('name', make_name_from_filename(filename))
new_include_node.setAttribute('file', filename)
new_include_node.setAttribute('type', 'BINDATA')
new_include_node.setAttribute('flattenhtml', 'true')
if filename.endswith('.html'):
new_include_node.setAttribute('allowexternalscript', 'true')
includes_node.appendChild(new_include_node)
def main(argv):
parsed_args = parse_args(argv[1:])
output_directory = os.path.dirname(parsed_args.output_filename)
doc = minidom.parseString(kGrdTemplate)
for filename in parsed_args.source_files:
add_file_to_grd(doc, os.path.basename(filename))
with open(parsed_args.output_filename, 'w') as output_file:
output_file.write(doc.toxml(encoding='UTF-8'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
ahaym/eden | languages/ne.py | 7 | 327294 | # -*- coding: utf-8 -*-
{
'# of International Staff': '# अन्तराष्ट्रिय स्टाफ',
'# of National Staff': '# राष्ट्रिय स्टाफ',
'# selected': '# छानियो',
'%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s इन्स्टल हुन सकेन । सर्भरलाई इन्स्टल गर्नको निम्ति सर्भरको एडमिनिस्ट्राटरलाई सोध्नुहोस् ।',
'%(count)s Roles of the user removed': '%(count)s प्रयोगकर्ताको भूमिकाहरू रद्द गरियो ।',
'%(count)s Users removed from Role': '%(count)s प्रयोगकर्ताहरूलाई भूमिकाबाट निकालियो ।',
'%(label)s contains %(values)s': '%(label)s मा %(values)s',
'%(label)s contains any of %(values)s': '%(label)s कुनै मा %(values)s',
'%(label)s does not contain %(values)s': '%(label)s छैन %(values)s',
'%(label)s is %(values)s': '%(label)s %(values)s हो',
'%(label)s like %(values)s': '%(label)s जस्तै %(values)s',
'%(label)s not like %(values)s': '%(label)s अमिल्दो %(values)s',
'%(module)s not installed': '%(module)s इन्स्टल भएको छैन ।',
'%(pe)s in %(location)s': '%(pe)s मा %(location)s',
'%(proj4js)s definition': '%(proj4js)s परिभाषा',
'%(resource)s Filter': '%(resource)s फिल्टर',
'%(site_label)s Status': '%(site_label)s स्टाटस्',
'%(site_label)s Status added': '%(site_label)s स्टाटस् संचित भयो',
'%(site_label)s Status deleted': '%(site_label)s स्टाटस् हटाइयो ',
'%(site_label)s Status updated': '%(site_label)s स्टाटस् परिमार्जन गरियो ',
'%(system_name)s - New User Registered': '%(system_name)s - नयाँ प्रयोगकर्ता दर्ता भयो ',
'%(system_name)s - New User Registration Approval Pending': '%(system_name)s - नयाँ प्रयोगकर्ता दर्ताको प्रमाणिकरण हुन बाँकी',
'%(system_name)s has sent an email to %(email)s to verify your email address.\\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s ले एउटा इमेल %(इमेल)s लाई तपाईँको इमेल ठेगाना प्रमाणित गर्नको निम्ती पठाएको छ । \\n कृपया यो ठेगानालाई प्रमाणित गर्नको निम्ति तपाईंको इमेल हेर्नुहोस् । तपाईंले यो इमेल प्राप्त गर्नु भएन भने कृपय जंक इमेलवा स्पाम फिल्टरमा हेर्नुहोला ।',
'%s and %s': '%s र %s',
'%s AND %s': '%s र %s',
'%s or %s': '%s अथवा %s',
'%s OR %s': '%s अथवा %s',
'& then click on the map below to adjust the Lat/Lon fields': 'त्यसपछी Lat/Lon फिल्डहरूलाई मिलाउनको निम्ति तलको नक्सामा क्लीक गर्नुहोस् ।',
'(filtered from _MAX_ total entries)': '(जम्मा भर्नाहरूबाट_धेरैबाट_ छानिएको)',
'* Required Fields': '* आवश्यक ठाउँहरू',
'...or add a new bin': '...वा नयाँ बिन राख्नुहोस् ।',
'1 location, shorter time, can contain multiple Tasks': '१ स्थान, छोटो समय, मा बहु कार्यहरू हुनसक्छन् ।',
'1. Fill the necessary fields in BLOCK CAPITAL letters.': '१. ठूलो अक्षर प्रयोग गरि दिएको खालि ठाउँ भर्नुहोस् ।',
'2. Always use one box per letter and leave one box space to separate words.': '२. एउटा अक्षरको निम्ति एउटा कोठा प्रयोग गर्नुहोस् र प्रत्येक शब्द पछि एउटा कोठा खालि छाड्नुहोस् ।',
'3. Fill in the circles completely.': '३. गोलाकारमा पूर्णरूपले भर्नुहोस् ।',
'3W': '३ डब्लू',
'3W Report': '३ डब्लू रिपोर्ट',
'A brief description of the group (optional)': 'समूहको संक्षिप्त विवरण (एच्छिक) ',
'A file in GPX format taken from a GPS.': 'जि.पि.एस. बाट जि.पि.एक्स्. फाइल लिइयो ।',
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "यो क्षेत्रको भौगोलिक क्षेत्रलाई देखाउने स्थान । यो एउटा स्थान बनावटबाटको ठाउँ, वा 'समूह स्थान', वा स्थान जस्को क्षेत्रको निम्ति घेरा हुन सक्दछ ।",
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'विशेषता समूहमा राखिएको चिन्हलाई पुन:लेखन गर्नु परेमा कुनै स्थानमा प्रयोग गरिएको चिन्ह राखिनेछ ।',
'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'एउटा परियोजना उद्देश्य चिन्हको पात्रोमा मुख्य मिति हुँदछ जस्ले सम्पूर्ण लक्षमा गरिएको प्रअन्तिमि विवरण देखाउँदछ ।',
'A strict location hierarchy cannot have gaps.': 'बाक्लो स्थान बनावटमा खालि ठाउँ हुँदैन',
'A task is a piece of work that an individual or team can do in 1-2 days.': 'कुनै पनि सानो काम ठुलो कामको टुक्रा हो, जसलाई एकजना ब्यक्तिले तथा समूहले १-२ दिनमा पुरा गर्न सक्दछ ।',
"A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": 'यदि कुनै स्वयम्-सेवकले अन्तिम वर्ष ८ वा सो भन्दा बढि समय (घण्टा) प्रति महिना सम्म कार्यक्रम वा तालिमहरूमा भाग लिएको छ भने उसलाई सकृय भनेर परिभाषित गर्न सकिन्छ ।',
'Abbreviation': 'संक्षेप',
'About': 'बारेमा',
'About Us': 'हाम्रो बारेमा',
'Academic': 'शैक्षिक',
'Access denied': 'प्रकृया रोकावट गरिएको छ ',
'Account Registered - Please Check Your Email': 'एकाउन्ट रजिष्टर भएको छ- कृपया तपाईंको इमेल हेर्नुहोस् ।',
'Acronym': 'छोटकरी रुप',
"Acronym of the organization's name, eg. IFRC.": 'संस्थाको नामको छोटकारी शब्द, जस्तै. आइ.एफ.आर.सी.',
'ACTION REQUIRED': 'कामको आवश्यकता पर्छ',
'Activate': 'सुचारू',
'activate to sort column ascending': 'छोटो कोलममा सानो देखि ठूलो मिलाउन सक्रिय गर्नुहोस्',
'activate to sort column descending': 'छोटो कोलममा ठूलो देखि सानो मिलाउन सक्रिय गर्नुहोस्',
'Active': 'सुचारित',
'Active Missions': 'सुचारु मिस्सनहरू',
'Active?': 'सुचारु?',
'Activities': 'कृयाकलापहरू',
'Activities matching Assessments': 'निर्धारण गरिएकोसँग मिल्दो कृयाकलापहरू',
'Activity': 'कृयाकलाप ',
'Activity Added': 'कृयाकलाप राखियो',
'Activity Deleted': 'कृयाकलाप हटाइयो ',
'Activity Details': 'कृयाकलाप विवरण',
'Activity Organization': 'कृयाकलाप वनावट',
'Activity Organization Added': 'कृयाकलाप वनावट राखियो ',
'Activity Organization Deleted': 'कृयाकलाप वनावट हटाइयो ',
'Activity Organization Updated': 'कृयाकलाप वनावट परिमार्जन गरियो ',
'Activity Organizations': 'कृयाकलाप वनावटहरू',
'Activity Report': 'कृयाकलाप प्रतिवेदन',
'Activity Type': 'कृयाकलापको प्रकार',
'Activity Type Added': 'कृयाकलापको प्रकार राखियो',
'Activity Type added to Activity': 'कृयाकलापमा, कृयाकलापको प्रकार राखियो ',
'Activity Type added to Project Location': 'परियोजनाको स्थानमा कृयाकलापको प्रकार राखियो ',
'Activity Type Deleted': 'कृयाकलापको प्रकार हटाइयो',
'Activity Type removed from Activity': 'कृयाकलापबाट, कृयाकलापको प्रकार हटाइयो ',
'Activity Type removed from Project Location': 'परियोजनाको स्थानबाट, कृयाकलापको प्रकार हटाइयो ',
'Activity Type Updated': 'कृयाकलापको प्रकार परिमार्जन गरियो',
'Activity Types': 'कृयाकलापका प्रकारहरू',
'Activity Updated': 'कृयाकलाप परिमार्जन गरियो ',
'Add': 'थप्ने',
'Add %(site_label)s Status': 'थप्ने %(site_label)s अवस्था',
'Add a new certificate to the catalog.': 'तालिकामा नयाँ प्रमाणपत्र राख्नुहोस्',
'Add a new competency rating to the catalog.': 'तालिकामा नयाँ प्रतिस्पर्धा स्तर राख्नुहोस्',
'Add a new membership type to the catalog.': 'तालिकामा नयाँ सदस्यता प्रकार राख्नुहोस्',
'Add a new program to the catalog.': 'तालिकामा नयाँ कार्यक्रम राख्नुहोस्',
'Add a new skill type to the catalog.': 'तालिकामा नयाँ सिप प्रकार राख्नुहोस्',
'Add a Person': 'ब्यक्ति राख्नुहोस्',
'Add Activity Type': 'नयाँ कृयाकलाप प्रकार',
'Add Activity Type to Activity': 'कृयाकलापमा, कृयाकलापको प्रकार राख्नुहोस्',
'Add Activity Type to Project Location': 'परियोजनाको स्थानमा कृयाकलापको प्रकार राख्नुहोस् ',
'Add Address': 'ठेगाना राख्नुहोस् ',
'Add Affiliation': 'स्विकृती राख्नुहोस् ',
'Add all organizations which are involved in different roles in this project': 'यस परियोजनामा फरक-फरक भूमिका निभाउने संस्थाहरू राख्नुहोस्',
'Add Annual Budget': 'नयाँ वार्षिक बजेट',
'Add Appraisal': 'मुल्यङकन राख्नुहोस् ',
'Add Award': 'पुरस्कार राख्नुहोस् ',
'Add Beneficiaries': 'भागिदारहरू राख्नुहोस् ',
'Add Branch Organization': 'शाखा संघ राख्नुहोस् ',
'Add Certificate for Course': 'पाठ्यक्रम प्रमाणपत्र राख्नुहोस् ',
'Add Certification': 'प्रमाणिकरण राख्नुहोस् ',
'Add Contact': 'सम्पर्क राख्नुहोस् ',
'Add Contact Information': 'सम्पर्क जानकारी राख्नुहोस् ',
'Add Credential': 'कागजात राख्नुहोस् ',
'Add Data to Theme Layer': 'स्वरूप (थिम) को तहमा आंकडा राख्नुहोस् ',
'Add Deployment': 'विकास राख्नुहोस् ',
'Add Education Detail': 'शैक्षिक विवरण राख्नुहोस् ',
'Add Group Member': 'समूह सदस्य राख्नुहोस् ',
'Add Hazard to Project': 'परियोजनामा खतरा राख्नुहोस्',
'Add Hours': 'घण्टा राख्नुहोस्',
'Add Identity': 'परिचय राख्नुहोस्',
'Add Image': 'तस्बिर राख्नुहोस् ',
'Add Keyword': 'मुख्यशब्द राख्नुहोस् ',
'Add Layer from Catalog': 'तालिकाबाट सतह राख्नुहोस् ',
'Add Layer to this Profile': 'यो प्रोफाइलमा सतह राख्नुहोस् ',
'Add Line': 'धर्का राख्नुहोस् ',
'Add Location to Organization': 'संस्थामा स्थान राख्नुहोस् ',
'Add Log Entry': 'तालिका प्रवेश राख्नुहोस् ',
'Add Member': 'सदस्य राख्नुहोस् ',
'Add Membership': 'सदस्यता राख्नुहोस्',
'Add new and manage existing members.': 'नयाँ थप र भैरहेको सदस्यहरुलाई व्यवस्थापन गर्न',
'Add new and manage existing staff.': 'नयाँ थप र भैरहेको कर्मचारीहरुलाई व्यवस्थापन गर्न',
'Add new and manage existing volunteers.': 'नयाँ थप र भैरहेको स्वयंसेवकहरुलाई व्यवस्थापन गर्न',
'Add New Address': 'नयाँ ठेगाना राख्नुहोस्',
'Add New Affiliation': 'नयाँ स्वीकृती राख्नुहोस्',
'Add New Appraisal': 'नयाँ मुल्याङ्कन राख्नुहोस्',
'Add New Award': 'नयाँ पुरस्कार राख्नुहोस्',
'Add New Beneficiaries': 'नयां भागिदारहरू राख्नुहोस् ',
'Add New Beneficiary Type': 'नयाँ भागिदारको प्रकार राख्नुहोस् ',
'Add New Branch': 'नयाँ शाखा राख्नुहोस् ',
'Add New Branch Organization': 'नयाँ शाखा संस्था राख्नुहोस् ',
'Add New Campaign': 'नयाँ क्याम्पिन राख्नुहोस्',
'Add New Certificate': 'नयाँ प्रमाणपत्र राख्नुहोस्',
'Add New Certification': 'नयाँ प्रमाणिकरण राख्नुहोस्',
'Add New Cluster': 'नयाँ समूह राख्नुहोस्',
'Add New Coalition': 'नयाँ संघ राख्नुहोस्',
'Add New Community': 'नयाँ समूदाय राख्नुहोस्',
'Add New Competency Rating': 'नयाँ प्रतिस्पर्धाको स्तर राख्नुहोस्',
'Add New Contact': 'नयाँ संम्पर्क राख्नुहोस्',
'Add New Course': 'नयाँ पाठ्यक्रम राख्नुहोस्',
'Add New Course Certificate': 'नहाँ पाठ्यक्रम प्रमाणपत्र राख्नुहोस्',
'Add New Credential': 'नयाँ कागजात राख्नुहोस्',
'Add New Data to Theme Layer': 'स्वरूपको तहमा नयाँ आंकडा राख्नुहोस्',
'Add New Department': 'नयाँ मन्त्रालय राख्नुहोस्',
'Add New Deployment': 'नयाँ कार्य राख्नुहोस्',
'Add New Donor': 'नयाँ दाता राख्नुहोस्',
'Add New Entry': 'नयाँ प्रवेश राख्नुहोस्',
'Add New Facility': 'नयाँ सूविधा राख्नुहोस्',
'Add New Facility Type': 'नयाँ सूविधाको प्रकार राख्नुहोस्',
'Add New Feature Layer': 'नयाँ विशेषता तह राख्नुहोस्',
'Add New Group': 'नयाँ समूह राख्नुहोस्',
'Add New Hazard': 'नयाँ खतरा राख्नुहोस्',
'Add New Hours': 'नयाँ घण्टाहरू राख्नुहोस्',
'Add New Identity': 'नयाँ परिचय राख्नुहोस्',
'Add New Image': 'नयाँ तस्विर राख्नुहोस्',
'Add New Job Title': 'नयाँ कामको पद राख्नुहोस्',
'Add New Keyword': 'नयाँ मुख्यशब्द राख्नुहोस्',
'Add New Layer': 'नयाँ तह राख्नुहोस्',
'Add New Layer to Symbology': 'चिन्हतामा नयाँ तह राख्नुहोस्',
'Add New Location': 'नयाँ स्थान राख्नुहोस्',
'Add New Location Hierarchy': 'नयाँ स्थान बनावट राख्नुहोस्',
'Add New Log Entry': 'नयाँ प्रवेश तालिका राख्नुहोस्',
'Add New Mailing List': 'नयाँ ठेगाना तालिका राख्नुहोस्',
'Add New Map Profile': 'नयाँ नक्सा बनावट राख्नुहोस्',
'Add New Marker': 'नयाँ चिन्ह राख्नुहोस्',
'Add New Member': 'नयाँ सदस्य राख्नुहोस्',
'Add New Membership': 'नयाँ सदस्यता राख्नुहोस्',
'Add New Membership Type': 'नयाँ सदस्यता प्रकार राख्नुहोस्',
'Add New Milestone': 'नयाँ उद्देश्य राख्नुहोस्',
'Add New Network': 'नयाँ नेटवर्क राख्नुहोस्',
'Add New Office': 'नयाँ कार्यलय राख्नुहोस्',
'Add New Office Type': 'नयाँ कार्यलय प्रकार राख्नुहोस्',
'Add New Organization': 'नयाँ संस्था राख्नुहोस्',
'Add New Organization Type': 'नयाँ संस्थाको प्रकार राख्नुहोस्',
'Add New Output': 'नयाँ नतिजा राख्नुहोस्',
'Add New Participant': 'नयाँ सहभागी राख्नुहोस्',
"Add New Person's Details": 'नयाँ ब्यक्तिको विवरण राख्नुहोस्',
'Add New PoI Type': 'नयाँ पोलको प्रकार राख्नुहोस्',
'Add New Point of Interest': 'नयाँ रुचीको बुँदा राख्नहोस्',
'Add New Policy or Strategy': 'नयाँ नियम तथा लक्ष राख्नुहोस्',
'Add New Professional Experience': 'नयाँ ब्यक्तिअन्तिम अनुभव राख्नुहोस्',
'Add New Profile Configuration': 'नयाँ प्रोफाइल बनावट राख्नुहोस्',
'Add New Program': 'नयाँ कार्यक्रम राख्नुहोस्',
'Add New Project': 'नयाँ परियोजना राख्नुहोस्',
'Add New Projection': 'नयाँ योजना राख्नुहोस्',
'Add New Record': 'नयाँ विवरण राख्नुहोस्',
'Add New Region': 'नया क्षेत्र राख्नुहोस',
'Add New Resource': 'नयाँ स्रोत राख्नुहोस्',
'Add New Response Summary': 'नयाँ प्रतिकृया संक्षेप राख्नुहोस्',
'Add New Role': 'नयाँ भूमिका राख्नुहोस्',
'Add New Room': 'नयाँ कोठा राख्नुहोस्',
'Add New Sector': 'नयाँ क्षेत्र राख्नुहोस्',
'Add New Service': 'नयाँ सेवा राख्नुहोस्',
'Add New Skill': 'नयाँ सिप राख्नुहोस्',
'Add New Skill Equivalence': 'नयाँ सिप सरह राख्नुहोस्',
'Add New Skill Type': 'नयाँ सिपको प्रकार राख्नुहोस्',
'Add New Staff Assignment': 'नयाँ कर्मचारीको काम राख्नुहोस्',
'Add New Staff Member': 'नयाँ कर्मचारी सदस्य राख्नुहोस्',
'Add New Status': 'नयाँ अवस्था राख्नुहोस्',
'Add New Symbology': 'नयाँ चिन्हता राख्नुहोस्',
'Add New Symbology for Layer': 'तहको लागि नयाँ चिन्हता राख्नुहोस्',
'Add New Task': 'नयाँ काम राख्नुहोस्',
'Add New Team': 'नयाँ समूह राख्नुहोस्',
'Add New Team Member': 'नयाँ समूह सदस्य राख्नुहोस्',
'Add New Theme': 'नयाँ स्वरूप राख्नुहोस्',
'Add New Training': 'नयाँ तालिम राख्नुहोस्',
'Add New Training Event': 'नयाँ तालिम कार्यक्रम राख्नुहोस्',
'Add New Volunteer': 'नयाँ स्वयम सेवक राख्नुहोस्',
'Add New Volunteer Cluster': 'नयाँ स्वयम सेवक कागजात राख्नुहोस्',
'Add New Volunteer Cluster Position': 'नयाँ स्वयम सेवकको पद कागजात राख्नुहोस्',
'Add New Volunteer Cluster Type': 'नयाँ स्वयम सेवक कागजातको प्रकार राख्नुहोस्',
'Add New Volunteer Role': 'नयाँ स्वयम सेवक भूमिका राख्नुहोस्',
'Add Office': 'कार्यलय राख्नुहोस्',
'Add Organization': 'संस्था राख्नुहोस्',
'Add Organization to Activity': 'कृयाकलापको बनावट राख्नुहोस्',
'Add Organization to Project': 'परियोजनामा संस्था राख्नुहोस्',
'Add Participant': 'सहभागी राख्नुहोस्',
'Add Person': 'ब्यक्ति राख्नुहोस्',
"Add Person's Details": 'ब्यक्तिको विवरण राख्नुहोस्',
'Add PoI Type': 'पोलको प्रकार राख्नुहोस्',
'Add Point': 'बुँदा राख्नुहोस्',
'Add Point of Interest': 'रूचीको बँदा राख्नुहोस्',
'Add Policy or Strategy': 'नियम तथा लक्ष राख्नुहोस्',
'Add Polygon': 'बहुभुजा राख्नुहोस्',
'Add Professional Experience': 'व्यबसायिक अनुभव राख्नुहोस्',
'Add Profile Configuration': 'प्रोफाइल बनावट राख्नुहोस्',
'Add Profile Configuration for this Layer': 'यो तहको लागि प्रोफाइल बनावट राख्नुहोस्',
'Add Project': 'परियोजना राख्नुहोस्',
'Add Response Summary': 'प्रतिकृया संक्षेप राख्नुहोस्',
'Add Role': 'भूमिका राख्नुहोस्',
'Add Room': 'कोठा राख्नुहोस्',
'Add saved search': 'संचित खोजी राख्नुहोस्',
'Add search': 'खोजी राख्नुहोस्',
'Add Sector': 'क्षेत्र राख्नुहोस्',
'Add Sector to Organization': 'संस्थामा क्षेत्र राख्नुहोस्',
'Add Sector to Project': 'परियोजनामा क्षेत्र राख्नुहोस्',
'Add Sector to Theme': 'स्वरूपमा क्षेत्र राख्नुहोस्',
'Add Service': 'सेवा राख्नुहोस्',
'Add Service to Organization': 'संस्थामा सेवा राख्नुहोस्',
'Add Skill': 'सिप राख्नुहोस्',
'Add Skill Equivalence': 'सिप सरह राख्नुहोस्',
'Add Skill Type': 'सिपको प्रकार राख्नुहोस्',
'Add Staff Assignment': 'कर्मचारीको काम राख्नुहोस्',
'Add Staff Member to Project': 'परियोजनामा कर्मचारी सदस्य राख्नुहोस्',
'Add Status': 'अवस्था राख्नुहोस्',
'Add Symbology': 'चिन्हता राख्नुहोस्',
'Add Symbology for Layer': 'तहको लागि चिन्हता राख्नुहोस्',
'Add Task': 'काम राख्नुहोस् ',
'Add Team': 'समूह राख्नुहोस् ',
'Add Team Member': 'समूह सदस्य राख्नुहोस् ',
'Add Theme': 'स्वरूप राख्नुहोस्',
'Add Theme to Activity': 'कृयाकलापमा स्वरूप राख्नुहोस्',
'Add Theme to Project': 'परियोजनामा स्वरूप राख्नुहोस्',
'Add Theme to Project Location': 'परियोजना स्थानमा स्वरूप राख्नुहोस्',
'Add this entry': 'यो प्रवेश राख्नुहोस',
'Add to a Team': 'समूहमा राख्नुहोस्',
'Add Training': 'तालिम राख्नुहोस्',
'Add...': 'राख्नुहोस्…',
'Address': 'ठेगाना',
'Address added': 'ठेगाना संचित गरियो',
'Address deleted': 'ठेगाना हटाइयो',
'Address Details': 'ठेगाना विवरण',
'Address Mapped': 'ठेगाना नक्सा',
'Address NOT Mapped': 'नक्सामा नदेखाइएको ठेगाना',
"Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": ' महत्वपूर्ण ब्यक्तिको निम्ति यो तहको लागि तस्विरको ठेगाना । सर्भरले जे देखाउँछ त्यसैलाई देखाउन (जुन जिओवेबकेचको माध्यमबाट कुनैपनि हालतमा काम गर्दैन) को साटो यस्ले नियन्त्रित स्थिर तस्विर प्रयोग गर्न सकिन्छ ।',
'Address Type': 'ठेगाना प्रकार',
'Address updated': 'ठेगाना परिमार्जन गरियो',
'Addresses': 'ठेगानाहरू',
'Adjust Stock Levels': 'भंडारको स्तर मिलाउनुहोस्',
'Admin': 'संचालक',
'Admin Assistant': 'संचालक सहयोगी',
'Administrador Database': 'संचालक तथा डाटाबेस',
'Administration': 'प्रशासन',
'Adolescent (12-20)': 'किशोर, किशोरी (१२-२०)',
'Adult (21-50)': 'जवान (२१-५०)',
'Advanced Search': 'बृहत खोजी',
'Advocacy': 'वकालत',
'Affiliation added': 'स्वीकृती संचित गरियो',
'Affiliation deleted': 'स्वीकृती हटाइयो',
'Affiliation Details': 'स्वीकृती विवरण',
'Affiliation updated': 'स्वीकृती परिमार्जन गरियो',
'Affiliations': 'स्वीकृतीहरू',
'Age': 'उमेर',
'Age Group': 'उमेर समूह',
'Airport': 'बिमान स्थल',
'Alerts': 'सचेतनाहरू',
'All': 'सबै',
'All Entities': 'सबै समूहहरू',
'All Open Tasks': 'सबै खुला कामहरू',
'All Records': 'सबै रेकर्डहरू',
'All selected': 'सबै छानियो',
'All Tasks': 'सबै कामहरू',
'Amount': 'मात्रा',
'Amount of the Project Budget spent at this location': 'यो स्थानमा खर्च गरिएको परियोजना बजेटको मात्रा',
'An error occured, please %(reload)s the page.': 'गल्ति भएको छ, कृपया पेजलाई %(reload)s गर्नुहोस् ।',
'An ESRI Shapefile (zipped)': 'ए.एस.आर.आइ. आकार फाइल (जिप गरिएको)',
'an individual/team to do in 1-2 days': '१-२ दिन(हरू)मा एक व्यक्ति/समूहले गर्नु पर्ने',
'and': 'र',
'Annual Budget': 'वार्षिक बजेट',
'Annual Budget deleted': 'वार्षिक बजेट हटाइएको छ',
'Annual Budget updated': 'वार्षिक बजेट परिमार्जन गरिएको छ',
'Annual Budgets': 'वार्षिक बजेटहरू',
'Anonymous': 'विविध',
'anonymous user': 'नामरहितको प्रयोगकर्ता',
'ANY': 'कुनैपनि',
'Any': 'कुनैपनि',
'Appeal Code': 'अपिल कोड',
'Applicable to projects in Pacific countries only': 'प्यसिफिक देशहरूको परियोजनामा मात्र लागु हुने',
'Application': 'लागु',
'Application Permissions': 'लागु अनुमतिहरू',
'Appraisal added': 'मुल्याङ्कन संचित गरियो',
'Appraisal deleted': 'मुल्याङ्कन हटाइयो',
'Appraisal Details': 'मुल्याङ्कन विवरण',
'Appraisal updated': 'मुल्याङ्कन परिमार्जन गरियो',
'Appraisals': 'मुल्याङ्कनहरू',
'Approve': 'प्रमाणित',
'Approver': 'प्रमाणित गर्ने',
'ArcGIS REST Layer': 'एर्क जि.आइ.एस. आर.इ.एस.टि. तह',
'Are you sure you want to delete this record?': 'तपाईं यो रेकर्ड हटाउने कुरामा निश्चित हुनुहुन्छ?',
'Assessment': 'लेखाजोखा',
'Assessment and Community/Beneficiary Identification': 'लेखाजोखा र सामुदायिक/ लाभान्वितहरुको पहिचान',
'Assessment Templates': 'लेखाजोखा फाराम',
'Assessments': 'लेखाजोखाहरु',
'Asset': 'सामाग्री',
'Asset Number': 'सामग्रीको संख्या',
'Assets': 'सामाग्रीहरु',
'Assign %(staff)s': 'काम %(staff)s',
'Assign another Role': 'अर्को भूमिका मुल्यङ्कन',
'Assign Asset': 'मुल्याङ्कन मा',
'Assign Role to a User': 'प्रयोगकर्ताको मुल्याङ्कन भूमिका',
'Assign Staff': 'मुल्याङ्कन कर्मचारी',
'Assigned': 'काममा सहभागी गरियो',
'Assigned To': 'को लागि सहभागी गरियो',
'Assigned to': 'को लागि सहभागी गरियो',
'Association': 'संघ',
'Attachments': 'अटाच्मेन्स्',
'Attributes': 'विशेषताहरू',
'Attribution': 'विशेषता',
'Australian Dollars': 'अष्ट्रेलियन डलर',
'Authentication Required': 'प्रमाणिकरण आवश्यक',
'Auxiliary Role': 'सहायक भूमिका',
'Availability': 'उपलब्धता',
'Available Forms': 'उपलब्ध फारम',
'Available in Viewer?': 'हेर्नेको लागि उपलब्ध?',
'Avalanche': 'हिमपहिरो',
'average': 'साधारण',
'Average Rating': 'दित्तिय भूमिका',
'Award': 'पुरस्कार',
'Award added': 'पुरस्कार संचित गरियो',
'Award deleted': 'पुरस्कार हटाइयो',
'Award updated': 'पुरस्कार परिमार्जन गरियो',
'Awards': 'पुरस्कारहरू',
'Awareness Raising': 'जनचेतना अभिवृद्धि',
'Back to Roles List': 'पछाडि भूमिका तालिकामा',
'Back to Top': 'पछाडि सिरानमा',
'Back to Users List': 'पछाडि प्रयोगकर्ता तालिकामा',
'Background Color': 'पृष्ठभूमी रंग',
'Bahai': 'बहाइ',
'Baldness': 'मोटाइ',
'Base Layer?': 'आधारभूत तह?',
'Base Layers': 'आधारभूत तहहरू',
'Base Location': 'आधारभुत स्थान',
'Basic Details': 'आधारभुत विवरण',
'Basic Search': 'आधारभुत खोजी',
'Bdrt (Branch Disaster Response Teams)': 'शाखा प्रकोप प्रतिकृया समूहहरू',
'Behaviour Change Communication': 'व्यवहार परिवर्तन संचार',
'Beneficiaries': 'भागिदारहरू',
'Beneficiaries Added': 'भागिदारहरू संचित गरियो',
'Beneficiaries Deleted': 'भागिदारहरू हटाइयो',
'Beneficiaries Details': 'भागिदारहरू विवरण',
'Beneficiaries Updated': 'भागिदारहरू परिमार्जन गरियो ',
'Beneficiary Report': 'भागिदार प्रतिवेदन',
'Beneficiary Type': 'भागिदार प्रकार',
'Beneficiary Type Added': 'भागिदार प्रकार राखियो',
'Beneficiary Type Deleted': 'भागिदार प्रकार हटाइयो',
'Beneficiary Type Updated': 'भागिदार प्रकार परिमार्जन गरियो',
'Beneficiary Types': 'भागिदार प्रकारहरू',
'Better Programming Initiative Guidance': 'उपयुक्त योजना पहल निर्देशन',
'Bilateral': 'सहकारी संस्था राख्नुहोस्',
'Bio data': 'बायोडाटा',
'Bing Layer': 'बिंग तह',
'black': 'कालो',
'Blocked': 'रोकावट गरिएको',
'blond': 'खैरो',
'Blood Banking': 'रअन्तिम बैंकिङ',
'Blood Donation and Services': 'रक्तदान सेवा',
'Blood Donor Recruitment': 'रक्तदाता नियुक्ती',
'Blood Type (AB0)': 'रअन्तिमको प्रकार (ए.बि.ओ.)',
'blue': 'निलो',
'Body': 'शरिर',
'Body Hair': 'शरिरीक रौं',
'Boq and Cost Estimation': 'बग तथा खर्च अडकल',
'Both': 'दुवै',
'Branch': 'शाखा',
'Branch Coordinator': 'शाखा सहकर्ता',
'Branch Organization added': 'शाखा संस्था संचित गरियो',
'Branch Organization deleted': 'शाखा संस्था हटाइयो ',
'Branch Organization Details': 'शाखा संस्था विवरणहरू',
'Branch Organization updated': 'शाखा संस्था परिमार्जन गरियो',
'Branch Organizations': 'शाखा संस्थाहरू',
'Branch Planning': 'शाखा योजना',
'Branches': 'शाखाहरू',
'Breakdown': 'फुट',
'brown': 'खैरो',
'Buddhist': 'बौद्दिस्ट',
'Budget': 'बजेट',
'Buffer': 'बफर',
'Building Name': 'भवन नाम',
'by': 'अनुसार',
'by %(person)s': '%(person)s अनुसार',
'By selecting this you agree that we may contact you.': 'यसलाइ छान्नुको अर्थ हामीले तपाईँलाई सम्पर्क गर्न सक्छौँ भन्नेमा सहमती हुनुभयो',
'Calendar': 'पात्रो',
'Camp': 'क्याम्प',
'Campaign': 'क्याम्पिन',
'Campaign Added': 'क्याम्पिन राखियो',
'Campaign Deleted': 'क्याम्पिन हटाइयो',
'Campaign Message': 'क्याम्पिन संदेश',
'Campaign Updated': 'क्याम्पिन परिमार्जन गरियो',
'Campaigns': 'क्याम्पिनहरू',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'खुलासडकनक्सा (.ओ.एस.एम.) वा बिकल्प बाट धुर्व पढ्न सक्छ ।',
'Canadian Dollars': 'क्यानडियन डलर',
'Cancel': 'रद्द',
'Cancel Crop': 'काटाइ रद्द',
'Cancel editing': 'बनाउन रद्द',
'Canceled': 'रद्द गरियो',
'cannot be deleted.': 'हटाउन सकिदैन',
'Cannot make an Organization a branch of itself!': 'शाखा आफैको संघ बनाउन सकिंदैन !',
'Cannot open created OSM file!': 'खुला रचना गरिएको खुलासडकनक्सा खुल्न सक्दैन ',
'Cannot read from file: %(filename)s': 'फाइलबाट खुल्नसकेन: %(fileनाम)s',
'Capacity Building': 'क्षमता विकास',
'Capacity Building of Governance': 'अधिकारको क्षमता निर्माण हुँदै',
'Capacity Building of Management Staff': 'ब्यबस्थापन कर्मचारीको क्षमता निर्माण',
'Capacity Building of Staff': 'कर्मचारीको क्षमता विकास',
'Capacity Building of Volunteers': 'स्वयमसेवकहरुको क्षमता विकास',
'Capacity Development': 'क्षमता विकास',
'Catalogs': 'तालिकाहरू',
'Catchment Protection': 'अटाच्मेन्स् सुरक्षा',
'Category': 'वर्गिकरण',
'caucasoid': 'क्यास्कोसाइड',
'CDRT (Community Disaster Response Teams)': 'समूदाय प्रकोप प्रतिकृया समूहहरू',
'Cell Tower': 'सेल टावर',
'Central': 'केन्द्रीय',
'Certificate': 'प्रमाणपत्र',
'Certificate added': 'प्रमाणपत्र राखियो',
'Certificate Catalog': 'प्रमाणपत्र तालिका',
'Certificate deleted': 'प्रमाणपत्र हटाइयो',
'Certificate Details': 'प्रमाणपत्र विवरण',
'Certificate List': 'प्र्रमाणपत्र सूची',
'Certificate updated': 'प्रमाणपत्र परिमार्जन गरियो',
'Certificates': 'प्रमाणपत्रहरू',
'Certification added': 'प्रमाणीकरण संचित गरियो',
'Certification deleted': 'प्रमाणीकरण हटाइयो',
'Certification Details': 'प्रमाणीकरण विवरणहरू',
'Certification updated': 'प्रमाणीकरण परिमार्जन गरियो',
'Certifications': 'प्रमाणीकरणहरू',
'Certifying Organization': 'प्रमाणीत गर्ने संस्थाहरू',
'Chairman': 'अध्यक्ष',
'Change Password': 'पासवर्ड परिवर्तन',
'Chapter': 'अध्याय',
'Check all': 'सबैलाई छान्नुहोस्',
'Check this to make your search viewable by others.': 'तपाईंको खोजी अरूले हेर्न मिल्ने बनाउनको निम्ति यसलाई छान्नुहोस्',
'Check-In': 'प्रवेश',
'Check-Out': 'बाहिरिनु',
'Child (2-11)': 'बालबालिका (२-११)',
"Children's Education": 'वालवालिकाको शिक्षा',
'Choose Country': 'देश छान्नुहोस्',
'Christian': 'इसाइ',
'Civil Society/NGOs': 'नागरीक समाज/गैरसरकारी संस्थाहरु',
'Clean-Up Campaign': 'सफाइ क्याम्पिन',
'Cleaner': 'सफा गर्ने',
'Clear': 'सफाइ',
'clear': 'सफाइ',
'Clear All': 'सबै सफाई',
'Clear filter': 'क्लियर फिल्टर',
'Clear selection': 'छानिएको सफाइ',
'Click anywhere on the map for full functionality': 'पूर्ण कार्यप्रस्तुतिको लागि नक्साको जुनसुकै भागमा पनि सफाइ',
'click here': 'यहाँ क्लिक गर्नुहोस्',
'Click on the link': 'लिन्क छान्नुहोस्',
'Click on the slider to choose a value': 'छान्नको निम्ति स्लाइडरमा छान्नुहोस्',
'Click to edit': 'परिमार्जन गर्नको निम्ति क्लिक गर्नुहोस्',
'Click where you want to open Streetview': 'स्ट्रट भिउ खोल्न चहानुभएको जुनसुकै ठाउमा क्लिक गर्नुहोस्',
'Climate Change': 'जलवायु परिवर्तन',
'Climate Change Adaptation ': 'जलवायु परिर्वतन अनकुलता',
'Climate Change Mitigation': 'जलवायु परिर्वतन अल्पीकरण',
'Close': 'बन्द',
'Close map': 'नक्सा बन्द',
'Closed': 'वन्द',
'Club 25 / Pledge 25': 'संघ २५/सपथ२५',
'Cluster': 'समूह',
'Cluster added': 'समूह संचित गरियो',
'Cluster Attribute': 'समूह बनावट',
'Cluster deleted': 'समूह हटाइयो',
'Cluster Details': 'समूह विवरण',
'Cluster Distance': 'समूह दुरी',
'Cluster Threshold': 'समूह चावी',
'Cluster updated': 'समूह परिमार्जन गरियो',
'Clusters': 'समूहहरू',
'Coalition added': 'सहकारी संचित गरियो',
'Coalition Details': 'सहकारी विवरण',
'Coalition removed': 'सहकारी हटाइयो',
'Coalition updated': 'सहकारी परिमार्जन गरियो',
'Coalitions': 'सहकारीहरू',
'Coastal Conservation ': 'कानूनी संरक्षण',
'Code': 'कोड',
'Cold Wave': 'शित लहर',
'Comment': 'टिका टिप्पणी',
'Comments': 'टिका टिप्पणी',
'Commitments': 'समर्पणतहरू',
'Communicable Diseases': 'सरुवा रोगहरु',
'Communication': 'संचार',
'Communication Officer': 'संचार कर्मचारी',
'Communities': 'समूदायहरू',
'Community': 'समूदाय',
'Community Action Planning': 'सामुदायिक कार्य योजना र्तजुमा',
'Community Added': 'समूदाय थपिएको छ',
'Community-based DRR': 'समुदायमा आधारीत विपद् जोखिम न्यूनीकरणऋय',
'Community Based Health and First Aid (CBHFA)': 'समूदायमा आधारित स्वास्थ्य तथा प्राथमिक उपचार (सि.बि.एच.एफ.ए.)',
'Community Contacts': 'समूदाय सम्पर्कहरू',
'Community Deleted': 'समूदाय हटाइयो',
'Community Details': 'समूदाय विवरण',
'Community Disaster Awareness': 'समूदाय प्रकोप जागरण',
'Community Early Warning Systems': 'समूदाय पुर्वङ सचेत अंग',
'Community Health': 'सामुदायिक स्वास्थ्य',
'Community Health Committees': 'समूदाय स्वास्थ्य समाजहरू',
'Community Health Initiative/Projects': 'समूदाय स्वास्थ्य पहल/परियोजनाहरू',
'Community Health Risk Assessments': 'समूदाय स्वास्थ्य खतरा मुल्याङ्कनहरू',
'Community Mobilisation': 'सामुदायिक परिचालन',
'Community Mobilization': 'सामुदायिक परिचालन',
'Community Organisation': 'सामुदायिक संस्था',
'Community Organization': 'सामुदायिक संस्था',
'Community Preparedness': 'समूदाय पुर्वतयारी',
'Community Updated': 'समूदाय परिमार्जन गरियो',
'Company': 'कम्पनी',
'Competency': 'प्रतिस्पर्धा',
'Competency Rating': 'प्रतिस्पर्धा स्तर',
'Competency Rating added': 'प्रतिस्पर्धा स्तर संचित गरियो',
'Competency Rating Catalog': 'प्रतिस्पर्धा स्तर तालिका',
'Competency Rating deleted': 'प्रतिस्पर्धा स्तर हटाइयो',
'Competency Rating Details': 'प्रतिस्पर्धा स्तर विवरण',
'Competency Rating updated': 'प्रतिस्पर्धा स्तर परिमार्जन गरियो',
'Completed': 'पुरा भयो',
'Complex Emergency': 'जटिल आपत्काल',
'Complexion': 'असहजता',
'Compromised': 'सम्झौता गरिएको',
'Config not found!': 'बनावट प्राप्त भएन!',
'Configuration': 'बनावट',
'Configure Layer for this Symbology': 'यो चिन्हताको लागि बनावट रुप',
'Confirmed': 'निश्चित गरियो',
'Confirming Organization': 'निश्चित गर्ने संस्था',
'Construction Activities': 'निर्माणकार्य सम्बन्धित कृयाकलापहरू',
'Construction of Transitional Shelter': 'संक्रमणकालिन आवासको निमार्ण',
'Construction of Water Supply Systems': 'पानी निर्यात निर्माणकार्य',
'Contact': 'सम्पर्क',
'Contact added': 'सम्पर्क राखियो',
'Contact Added': 'सम्पर्क राखियो',
'Contact Data': 'सम्पर्क डाटा',
'Contact deleted': 'सम्पर्क हटाइयो',
'Contact Deleted': 'सम्पर्क हटाइयो',
'Contact Details': 'सम्पर्क विवरण',
'Contact Details updated': 'सम्पर्क विवरण परिमार्जन गरियो',
'Contact Info': 'सम्पर्क जानकारी',
'Contact Information': 'सम्पर्क जानकारी',
'Contact Information Added': 'सम्पर्क जानकारी राखियो',
'Contact Information Deleted': 'सम्पर्क जानकारी हटाइयो',
'Contact Information Updated': 'सम्पर्क जानकारी परिमार्जन गरियो',
'Contact Method': 'सम्पर्क तरिकार',
'Contact People': 'सम्पर्क मानिसहरू',
'Contact Person': 'सम्पर्क ब्यक्ति',
'Contact Updated': 'सम्पर्क परिमार्जन गरियो',
'Contact Us': 'हामीलाई सम्पर्क गर्नुहोस्',
'Contact us': 'हामीलाई सम्पर्क गर्नुहोस्',
'Contacts': 'सम्पर्कहरू',
'Context': 'अवस्था',
'Contingency/Preparedness Planning': 'अपतकालिन पूर्वतयारी योजना',
'Contract End Date': 'सम्झौता सकिने मिति',
'Contractual Agreements (Community/Individual)': 'सम्झौता सहमतिहरू (समूदाय/ब्यक्तिअन्तिम)',
'Contractual Agreements (Governmental)': 'सम्झौता सहमतिहरू (सरकारी)',
'Controller': 'नियन्त्रक',
'Cook Islands': 'पकाउनेहरू',
'Coordinate Layer': 'सहकर्ता तह',
'Coordination and Partnerships': 'समन्वय र साझेदारी',
'Coordinator': 'सहकर्ता',
'COPY': 'कपी',
'Corporate Entity': 'सहकारी अंग',
'Could not add person record': 'ब्यक्तिको विवरण राख्न सकिएन',
'Could not create record.': 'विवरण बन्न सकेन',
'Could not generate report': 'विवरण आउन सकेन',
'Could not merge records. (Internal Error: %s)': 'विवरणहरू एकिकृत गर्न सकिएन । (आन्तरिक कारण: %s)',
"Couldn't open %s!": 'खुल्न सकेन %s!',
'Country': 'देश',
'Country Code': 'देश कोड नम्बर',
'Country is required!': 'देश आवश्यक छ!',
'Course': 'पाठ्यक्रम',
'Course added': 'पाठ्यक्रम संचित गरियो',
'Course Catalog': 'पाठ्यक्रम तालिका',
'Course Certificate added': 'पाठ्यक्रम प्रमाण-पत्र संचित गरियो',
'Course Certificate deleted': 'पाठ्यक्रम प्रमाण-पत्र हटाइयो',
'Course Certificate Details': 'पाठ्यक्रम प्रमाण-पत्र विवरण',
'Course Certificate updated': 'पाठ्यक्रम प्रमाण-पत्र परिमार्जन गरियो',
'Course Certificates': 'पाठ्यक्रम प्रमाण-पत्रहरू',
'Course deleted': 'पाठ्यक्रम हटाइयो',
'Course Details': 'पाठ्यक्रम विवरण',
'Course updated': 'पाठ्यक्रम परिमार्जन गरियो',
'CREATE': 'बनाउनुहोस',
'Create': 'बनाउनुहोस',
"Create 'More Info'": "थप जानकारी' बनाउनुहोस्",
'Create a new facility or ensure that you have permissions for an existing facility.': 'नयाँ सुविधा बनाउनुहोस् वा हालको सुविधामा तपाईंलाई स्वीकृती छ भन्ने निश्चित गर्नुहोस्',
'Create a new Group.': 'नयाँ समूह बनाउनुहोस्',
'Create a new organization or ensure that you have permissions for an existing organization.': 'नयाँ संस्था बनाउनुहोस् वा हालको संस्थामा तपाईंलाई स्वीकृती छ भन्ने निश्चित गर्नुहोस्',
'Create a new Team.': 'नयाँ समूह बनाउनुहोस्',
'Create Activity': 'कृयाकलाप राख्नुहोस्',
'Create Activity Type': 'कृयाकलापको प्रकार राख्नुहोस् ',
'Create Award': 'पुरस्कार राख्नुहोस् ',
'Create Beneficiary Type': 'भागिदारको प्रकार राख्नुहोस् ',
'Create Campaign': 'क्याम्पिन राख्नुहोस् ',
'Create Certificate': 'प्रमाणपत्र राख्नुहोस् ',
'Create Cluster': 'समूह राख्नुहोस् ',
'Create Coalition': 'संघ राख्नुहोस् ',
'Create Community': 'समूदाय राख्नुहोस् ',
'Create Competency Rating': 'प्रतिस्पर्धाको स्तर राख्नुहोस् ',
'Create Contact': 'सम्पर्क राख्नुहोस् ',
'Create Course': 'पाठ्यक्रम राख्नुहोस् ',
'Create Department': 'मन्त्रालय राख्नुहोस् ',
'Create Facility': 'सूविधा राख्नुहोस् ',
'Create Facility Type': 'सूविधाको प्रकार राख्नुहोस् ',
'Create Feature Layer': 'बिशेसता तह राख्नुहोस् ',
'Create Group': 'समूह राख्नुहोस् ',
'Create Hazard': 'खतरा राख्नुहोस् ',
'Create Job': 'काम राख्नुहोस् ',
'Create Job Title': 'कामको पद राख्नुहोस् ',
'Create Layer': 'तह राख्नुहोस् ',
'Create Location': 'स्थान राख्नुहोस् ',
'Create Location Hierarchy': 'स्थानको बनावट राख्नुहोस् ',
'Create Mailing List': 'ठेगाना तालिका राख्नुहोस् ',
'Create Map Profile': 'नक्साको बनावट राख्नुहोस् ',
'Create Marker': 'चिन्ह राख्नुहोस् ',
'Create Member': 'सदस्य राख्नुहोस् ',
'Create Membership Type': 'सदस्यताको प्रकार राख्नुहोस् ',
'Create Milestone': 'उद्देश्य राख्नुहोस् ',
'Create National Society': 'राष्ट्रिय समूदाय राख्नुहोस् ',
'Create Network': 'नेटवर्क राख्नुहोस् ',
'Create Office': 'कार्यालयको विवरण बनाउनुहोस्',
'Create Office Type': 'कार्यलयको प्रकार राख्नुहोस्',
'Create Organization Type': 'संस्थाको प्रकार राख्नुहोस्',
'Create Partner Organization': 'सहकारी संस्था राख्नुहोस्',
'Create Program': 'कार्यक्रम',
'Create Project': 'परियोजनाहरु बनाउनुहोस्',
'Create Projection': 'योजना राख्नुहोस्',
'Create Record': 'रेकर्ड राख्नुहोस्',
'Create Region': 'क्षेत्र राख्नुहोस्',
'Create Resource': 'स्रोत राख्नुहोस्',
'Create Resource Type': 'स्रोत प्रकार राख्नुहोस्',
'Create Role': 'नयाँ भूमिका बनाउनुहोस्',
'Create Sector': 'क्षेत्र राख्नुहोस्',
'Create Staff Member': 'कर्मचारीको विवरण राख्नुहोस्',
'Create Team': 'समूह बनाउनुहोस्',
'Create Training Event': 'तालिम विवरण राख्नुहोस्',
'Create User': 'नयाँ प्रयोगकर्ता बनाउनुहोस्',
'Create Volunteer': 'स्वयम् सेवक राख्नुहोस्',
'Create Volunteer Cluster': 'स्वयम् सेवक कागजात राख्नुहोस्',
'Create Volunteer Cluster Position': 'स्वयम् सेवक पद कागजात राख्नुहोस्',
'Create Volunteer Cluster Type': 'स्वयम् सेवकको कागजात प्रकार राख्नुहोस्',
'Create Volunteer Role': 'स्वयम सेवकको भूमिका राख्नुहोस्',
'Create Volunteer to Project': 'परियोजनामा स्वयम सेवक राख्नुहोस्',
'created': 'बनाइयो',
'Created By': 'द्वारा बनाइएको',
'Created on %s': ' %s मा बनाइएको',
'Created on %s by %s': '%s मा %s द्वारा बनाइएको',
'Credential': 'कागजात',
'Credential added': 'कागजात संचित गरियो',
'Credential deleted': 'कागजात हटाइयो',
'Credential Details': 'कागजात विवरण',
'Credential updated': 'कागजात परिमार्जन गरियो',
'Credentialling Organization': 'कागजात व्यबस्थापन',
'Credentials': 'कागजातहरू',
'Critical Infrastructure': 'जोखिमयुक्त भौतिक पूर्वाधार',
'Crop Image': 'तस्विर काट्नुहोस्',
'curly': 'उपचार हुने',
'Currency': 'मुद्रा',
'Current': 'हाल',
'current': 'वर्तमान',
'Current Home Address': 'हालको घरको ठेगाना',
'Current Location': 'हालको स्थान',
'Currently no Appraisals entered': 'हाल कुनै मुल्यांकन राखिएको छैन',
'Currently no Certifications registered': 'हाल कुनै प्रमाणीकरण राखिएको छैन',
'Currently no Course Certificates registered': 'हाल कुनैपनि पाठ्यक्रम प्रमाण-पत्रहरू दर्ता गरिएको छैन',
'Currently no Credentials registered': 'हाल कुनैपनि कागजातहरू दर्ता गरिएको छैन',
'Currently no entries in the catalog': 'हाल तालिकामा कुनैपनि कुरा राखिएको छैन',
'Currently no hours recorded for this volunteer': 'हाल यो स्वयम्-सेवकको कुनै पनि समय राखिएको छैन',
'Currently no Participants registered': 'हाल कुनैपनि सहभागीहरू दर्ता गरिएको छैन',
'Currently no Professional Experience entered': 'हाल कुनैपनि ब्यबसायिक अनुभव राखिएको छैन',
'Currently no programs registered': 'हाल कुनैपनि कार्यक्रम दर्ता गरिएको छैन',
'Currently no Skill Equivalences registered': 'हाल कुनैपनि सिप हरह दर्ता गरिएको छैन',
'Currently no Skills registered': 'हाल कुनैपनि सिपहरू दर्ता गरिएको छैन',
'Currently no staff assigned': 'हाल कुनैपनि कर्मचारीलाई काममा लगाइएको छैन',
'Currently no training events registered': 'हाल कुनैपनि तालिम कार्यक्रम दर्ता गरिएको छैन',
'Currently no Trainings registered': 'हाल कुनैपनि तालिहरू दर्ता गरिएको छैन',
'CV': 'बयोडाटा',
'Cyclone': 'भूमरी',
'Daily': 'दैनिक',
'Daily Work': 'दैनिक कार्य',
'dark': 'अँध्यारो',
'Data': 'आंकडा',
'Data added to Theme Layer': 'स्वरूप तहमा आँकडा संचित गरियो',
'Data import error': 'आँकडा राख्नु गल्ती',
'Data Type': 'आँकडा प्रकार',
'Data uploaded': 'आँकडा संचित गरियो',
'Database': 'आँकडासम्बन्धी',
'Database Development': 'डाटावेश विकास',
'Date': 'मिति',
'Date Created': 'मिति परिवर्तन गरियो',
'Date Due': 'मिति द्वय',
'Date Joined': 'प्रवेश मिति',
'Date Modified': 'मिति परिवर्तन गरियो',
'Date must be %(max)s or earlier!': 'मिति %(max)s वा अघिको हुनैपर्छ!',
'Date must be %(min)s or later!': 'मिति %(min)s वा पछिको हुनैपर्छ!',
'Date must be between %(min)s and %(max)s!': 'मिति %(min)s र %(max)s को बिचमा हुनैपर्छ !',
'Date of Birth': 'जन्म मिति',
'Date Printed': 'मिति प्रीन्ट गरियो',
'Date Received': 'मिति प्राप्त गरियो',
'Date resigned': 'छोडेको मिति',
'Date/Time': 'मिति/समय',
'Day': 'दिन',
'De-duplicate': 'नक्कल प्रति बनाउनुहोस्',
'De-duplicate Records': 'विवरणको नक्कल प्रति बनाउनुहोस्',
'Dead Body': 'मृत शरिर',
'deceased': 'मृत',
'Deceased': 'मृत',
'Decision': 'निर्णय',
'Default': 'स्वचलानमा रहेको',
'Default Base layer?': 'स्वचलानमा रहेको आधारभुत तह?',
'Default Location': 'स्वचलानमा रहेको स्थान',
'Default Marker': 'स्वचलानमा रहेको चिन्ह',
'Default Realm': 'स्वचलानमा रहेको क्षेत्र',
'Default Realm = All Entities the User is a Staff Member of': 'स्वचलानमा रहेको क्षेत्र = को कर्मचारी सदस्य सबै अंगका प्रयोगकर्ता',
'Default?': 'स्वचलानमा रहेको?',
'Defines the icon used for display of features on handheld GPS.': 'हास्त निर्देशित जि.पि.एस.मा डिस्प्ले दिने कार्यको निम्ति प्रयोग गरिएको आइकनलाई परिभाषित गर्दछ ।',
'Defines the icon used for display of features on interactive map & KML exports.': 'इन्टर्याक्टिभ नक्सा तथा के.एम.एल. विवरणमा डिस्प्ले दिने कार्यको निम्ति प्रयोग गरिएको आइकनलाई परिभाषित गर्दछ ।',
'Degrees in a latitude must be between -90 to 90.': 'अक्षांशमा प्रयोग गरिएको डिग्री -९० देखि ९० मध्येमा हुनुपर्छ ।',
'Degrees in a longitude must be between -180 to 180.': 'देशान्तरमा प्रयोग गरिएको डिग्री -१८० देखि १८० मध्येमा हुनुपर्छ ।',
'Degrees must be a number.': 'कोणहरू अंकमा नै हुनुपर्छ ',
'DELETE': 'डि.इ.एल.इ.टि.इ.',
'Delete': 'हटाउनुहोस्',
'Delete Affiliation': 'स्वीकृति हटाउनुहोस्',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'अपलोड भन्दा पहिले प्रयोगकर्तासँग स्वीकृती रहेको यो प्रकारको सम्पूर्ण आँकडा हटाउनुहोस्. अफलाइनमा आँकडा तयार गरि पढ्नको लागि मात्र संचित गर्नको लागि यो बनाइएको हो ।',
'Delete Appraisal': 'मुल्यंकन हटाउनुहोस्',
'Delete Award': 'पुरस्कार हटाउनुहोस्',
'Delete Branch': 'शाखा हटाउनुहोस्',
'Delete Certificate': 'प्रमाण-पत्र हटाउनुहोस्',
'Delete Certification': 'प्रमाणीकरण हटाउनुहोस्',
'Delete Cluster': 'समूह हटाउनुहोस्',
'Delete Competency Rating': 'प्रतिस्पर्धा स्तर हटाउनुहोस्',
'Delete Contact': 'सम्पर्क हटाउनुहोस्',
'Delete Contact Information': 'सम्पर्क जानकारी हटाउनुहोस्',
'Delete Course': 'पाठ्यक्रम हटाउनुहोस्',
'Delete Course Certificate': 'पाठ्यक्रम प्रमाण-पत्र हटाउनुहोस्',
'Delete Credential': 'कागजात हटाउनुहोस्',
'Delete Data from Theme layer': 'स्वरूप तह बाट आँकडा हटाउनुहोस्',
'Delete Department': 'मन्त्रालय हटाउनुहोस्',
'Delete Deployment': 'परियोजन हटाउनुहोस्',
'Delete Donor': 'दाता हटाउनुहोस्',
'Delete Facility': 'सुविधा हटाउनुहोस्',
'Delete Facility Type': 'सुविधाको प्रकार हटाउनुहोस्',
'Delete Feature Layer': 'विशेषता तह हटाउनुहोस्',
'Delete Group': 'समूह हटाउनुहोस्',
'Delete Hazard': 'खतरा हटाउनुहोस्',
'Delete Hours': 'घण्टा हटाउनुहोस्',
'Delete Image': 'तस्विर हटाउनुहोस्',
'Delete Job Title': 'पद हटाउनुहोस्',
'Delete Layer': 'तह हटाउनुहोस्',
'Delete Location': 'स्थान हटाउनुहोस्',
'Delete Location Hierarchy': 'स्थान बनावट हटाउनुहोस्',
'Delete Mailing List': 'ठेगाना तालिका हटाउनुहोस्',
'Delete Map Profile': 'नक्सा बनावट हटाउनुहोस्',
'Delete Marker': 'चिन्ह हटाउनुहोस्',
'Delete Member': 'सदस्य हटाउनुहोस्',
'Delete Membership': 'सदस्यता हटाउनुहोस्',
'Delete Membership Type': 'सदस्यता प्रकार हटाउनुहोस्',
'Delete National Society': 'राष्ट्रिय समाज हटाउनुहोस्',
'Delete Office': 'कार्यलय हटाउनुहोस्',
'Delete Office Type': 'कार्यलय प्रकार हटाउनुहोस्',
'Delete Organization': 'संस्था हटाउनुहोस्',
'Delete Organization Type': 'संस्था प्रकार हटाउनुहोस्',
'Delete Participant': 'सहभागी हटाउनुहोस्',
'Delete Partner Organization': 'साझेदार संस्था हटाउनुहोस्',
'Delete Person': 'ब्यक्ति हटाउनुहोस्',
'Delete PoI Type': 'धुर्व प्रकार हटाउनुहोस्',
'Delete Point of Interest': 'रूचीको बुँदा हटाउनुहोस्',
'Delete Professional Experience': 'ब्यबसायिक अनुभव हटाउनुहोस्',
'Delete Program': 'कार्यक्रम हटाउनुहोस्',
'Delete Project': 'परियोजना हटाउनुहोस्',
'Delete Projection': 'योजना हटाउनुहोस्',
'Delete Record': 'विवरण हटाउनुहोस्',
'Delete Region': 'क्षेत्र हटाउनुहोस्',
'Delete Resource': 'स्रोत हटाउनुहोस्',
'Delete Resource Type': 'स्रोत प्रकार हटाउनुहोस्',
'Delete Role': 'भूमिका हटाउनुहोस्',
'Delete Room': 'कोठा हटाउनुहोस्',
'Delete saved search': 'संचित खोजी हटाउनुहोस्',
'Delete Sector': 'क्षेत्र हटाउनुहोस्',
'Delete Service': 'सेवा हटाउनुहोस्',
'Delete Skill': 'सिप हटाउनुहोस्',
'Delete Skill Equivalence': 'सिप सरह हटाउनुहोस्',
'Delete Skill Type': 'सिप प्रकार हटाउनुहोस्',
'Delete Staff Assignment': 'कर्मचारीको काम हटाउनुहोस्',
'Delete Staff Member': 'कर्मचारी सदस्य हटाउनुहोस्',
'Delete Status': 'अवस्था हटाउनुहोस्',
'Delete Symbology': 'चिन्हता हटाउनुहोस्',
'Delete Theme': 'स्वरूप हटाउनुहोस्',
'Delete this Filter': 'यो फिल्टर हटाउनुहोस्',
'Delete Training': 'तालिम हटाउनुहोस्',
'Delete Training Event': 'तालिम कार्यक्रम हटाउनुहोस्',
'Delete Volunteer': 'स्वयम्-सेवक हटाउनुहोस्',
'Delete Volunteer Cluster': 'स्वयम्-सेवक समूह हटाउनुहोस्',
'Delete Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद हटाउनुहोस्',
'Delete Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार हटाउनुहोस्',
'Delete Volunteer Role': 'स्वयम्-सेवक भूमिका हटाउनुहोस्',
'deleted': 'हटाइयो',
'Demographics': 'जनसांखिकिय',
'Department / Unit': 'विभाग/इकाईविभाग/इकाईविभाग/इकाई',
'Department added': 'मन्त्रालय संचित गरियो',
'Department Catalog': 'विभागीय तालिका',
'Department deleted': 'मन्त्रालय हटाइयो',
'Department Details': 'मन्त्रालय विवरण',
'Department updated': 'मन्त्रालय परिमार्जन गरियो',
'Deployed': 'परियोजन गरियो',
'Deploying NS': 'एन.एस. परियोजन',
'Deployment': 'परियोजन',
'Deployment added': 'परियोजन संचित गरियो',
'Deployment Alert': 'परियोजन सचेतक',
'Deployment Date': 'परियोजन मिति',
'Deployment deleted': 'परियोजन हटाइयो',
'Deployment Details': 'परियोजन विवरण',
'Deployment Details updated': 'परियोजन विवरण परिमार्जन गरियो',
'Deployment Location': 'परियोजन स्थान',
'Deployments': 'परियोजनहरू',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'प्रकृया परिभाषित गर्नुहोस् जसले यो विवरणलाई (जस्तै "मेडिकल परिक्षा") सँग सम्बन्धित गराउँदछ ।',
'Description': 'ब्याख्या',
'Design, deploy & analyze surveys.': 'सर्वेक्षणको ढाँचा तयारी, परिचालन र विश्लेषण',
'Designation': 'पद',
'Desluding ': 'धोका',
'Destination': 'गन्तब्य',
'Detailed Description/URL': 'विस्तृत ब्याख्या/यू.आर.एल.',
'Details': 'विवरण',
'Disable': 'निस्कृय',
'Disaster Law': 'विपद् कानून ',
'Disaster Management System Officer': 'प्रकोप ब्यबस्थापन प्रकृया कर्मचारी',
'Disaster Management Unit Assistant': 'प्रकोप ब्यबस्थापन इकाई सहयोग',
'Disaster Risk Management': 'विपद् जोखिम ब्यवस्थापन',
'Disaster Risk Reduction': 'विपद् जोखिम न्यूनिकरण',
'Disaster Type': 'प्रकोप प्रकार',
'Disease Prevention': 'रोग रोकथाम',
'diseased': 'रोग लागेको',
'displaced': 'गलत स्थानमा राखिएको',
'Display Polygons?': 'डिस्प्ले बहुभुज?',
'Display Routes?': 'डिस्प्ले मार्गहरू?',
'Display Tracks?': 'डिस्प्ले ट्र्याक?',
'Display Waypoints?': 'डिस्प्ले मार्ग बिन्दुहरू?',
'Distribution of Food': 'खाद्यान्न वितरण',
'Distribution of Non-Food Items': 'गैर खाद्य सामग्री वितरण',
'Distribution of Shelter Repair Kits': 'आवास मर्मत सामग्री वितरण',
'Diversifying Livelihoods': 'जीवीकोपार्जनमा विविधिकरण',
'divorced': 'पारपाचुके',
'DM / Relief': 'डि.एम./सहयोग',
'DM Planning': 'डि.एम योजना',
'Do you really want to approve this record?': 'के तपाईं यो विवरणलाई वास्तबमा नै परिवर्तन गर्न चाहानुहुन्छ?',
'Do you really want to delete these records?': 'के तपाईं यी विवरणहरूलाई वास्तबमा नै हटाउन चाहानुहुन्छ?',
'Do you really want to delete this record? (This action can not be reversed)': 'के तपाईं यो विवरणलाई हटाउन चाहानुहुन्छ? (यसलाई फर्काउन सकिँदैन)',
'Document Scan': 'कागजात स्क्यान',
'Documents': 'कागजातहरू',
'Domain': 'तल्लो निकाय',
'Donor': 'दाता',
'Donor added': 'दाता संचित गरियो',
'Donor deleted': 'दाता हटाइयो',
'Donor Details': 'दाता विवरण',
'Donor Driven Housing Reconstruction': 'दाता निर्देशित आवास पूर्ननिर्माण',
'Donor updated': 'दाता परिमार्जन गरियो',
'Donors': 'दाताहरू',
'Donors Report': 'दाताहरूको प्रतिबेदन',
'Download OCR-able PDF Form': 'ओ.सि.आर. भएको पि.डि.एफ. फारम डाउनलोड गर्नुहोस्',
'Download Template': 'सदस्यता तालिका डाउनलोड',
'Draft': 'खाका',
'Draft Features': 'खाका विशेषताहरू',
'Drag an image below to crop and scale it before uploading it:': 'अपलोड गर्न भन्दा पहिले तस्बिरलाई तल घिस्र्याएर काटेर आकार दिनुहोस्:',
'Drainage': 'ढल',
'Draw a square to limit the results to just those within the square.': 'बर्गकारको आकारमा राख्नको लागी बर्गकार आकार खिच्नुहोस्',
'Driver': 'सवारी चालक',
'Driving License': 'सवारी चालक अनुमती पत्र',
'Drought': 'खडेरी',
'DRR': 'डि.आर.आर.',
'DRRPP Extensions': 'डि.आर.आर.पि.पि. थप कार्यक्रमहरू',
'Duplicate': 'नक्कल',
'Duplicate label selected': 'नक्कल स्तर छानियो',
'Duration': 'अवधी',
'Duration (months)': 'अवधी (महिनाहरू)',
'E-mail': 'इ-मेल',
'Early Warning': 'पूर्वचेतावनी',
'Early Warning Systems': 'पहिलेको सचेत प्रकृयाहरू',
'Earthquake': 'भूकम्प',
'Earthquakes': 'भूकम्प',
'Edit': 'परिवर्तनन',
'Edit %(site_label)s Status': '%(site_label)s अवस्था परिवर्तन',
"Edit 'More Info'": "थप जानकारी' परिवर्तन",
'Edit Activity': 'कृयाकलाप परिवर्तन',
'Edit Activity Organization': 'कृयाकलाप ब्यबस्थापन परिवर्तन',
'Edit Activity Type': 'कृयाकलाप प्रकार परिवर्तन',
'Edit Address': 'ठेगाना परिवर्तन',
'Edit Affiliation': 'स्वीकृती परिवर्तन',
'Edit Annual Budget': 'वार्षिक बजेट परिवर्तन',
'Edit Appraisal': 'मुल्यांकन परिवर्तन',
'Edit Award': 'परस्कार परिवर्तन',
'Edit Beneficiaries': 'भागिदारहरू परिवर्तन',
'Edit Beneficiary Type': 'भागिदार प्रकार परिवर्तन',
'Edit Branch Organization': 'शाखा संस्था परिवर्तन',
'Edit Campaign': 'क्याम्पिन परिवर्तन',
'Edit Certificate': 'प्रमाण-पत्र परिवर्तन',
'Edit Certification': 'प्रमाणिकरण परिवर्तन',
'Edit Cluster': 'समूह परिवर्तन',
'Edit Community Details': 'समुदाय विवरण परिवर्तन',
'Edit Competency Rating': 'प्रतिस्पर्धा स्तर परिवर्तन',
'Edit Contact': 'सम्पर्क परिवर्तन',
'Edit Contact Details': 'सम्पर्क विवरण परिवर्तन',
'Edit Contact Information': 'सम्पर्क जानकारी परिवर्तन',
'Edit Course': 'पाठ्यक्रम परिवर्तन',
'Edit Course Certificate': 'पाठ्यक्रम प्रमाण-पत्र परिवर्तन',
'Edit Credential': 'कागजात परिवर्तन',
'Edit Department': 'मन्त्रालय परिवर्तन',
'Edit Deployment Details': 'परियोजन विवरण परिवर्तन',
'Edit Details': 'विवरण परिवर्तन',
'Edit Donor': 'दाता परिवर्तन',
'Edit DRRPP Extensions': 'डि. आर. आर. पि. पि. थप कार्यक्रमहरू परिवर्तन',
'Edit Education Details': 'शिक्षा विवरण परिवर्तन',
'Edit Entry': 'प्रवेश परिवर्तन',
'Edit Experience': 'अनुभव परिवर्तन',
'Edit Facility': 'सुविधा परिवर्तन',
'Edit Facility Type': 'सुविधा प्रकार परिवर्तन',
'Edit Feature Layer': 'विशेषता तह परिवर्तन',
'Edit Group': 'समूह परिवर्तन',
'Edit Hazard': 'खतरा परिवर्तन',
'Edit Hours': 'समय (घण्टा) परिवर्तन',
'Edit Identity': 'परिचय परिवर्तन',
'Edit Image Details': 'तस्विर विवरण परिवर्तन',
'Edit Job': 'काम परिवर्तन',
'Edit Job Title': 'पद परिवर्तन',
'Edit Keyword': 'मुख्यशब्द परिवर्तन',
'Edit Layer': 'तह परिवर्तन',
'Edit Level %d Locations?': 'स्तर %d स्थानहरू? परिवर्तन',
'Edit Location': 'स्थान परिवर्तन',
'Edit Location Details': 'स्थान विवरण परिवर्तन',
'Edit Location Hierarchy': 'स्थान बनावट परिवर्तन',
'Edit Log Entry': 'दर्ताप्रवेश परिवर्तन',
'Edit Logged Time': 'दर्ता गरिएको समय परिवर्तन',
'Edit Mailing List': 'ठेगाना तालिका परिवर्तन',
'Edit Map Profile': 'नक्सा बनावट परिवर्तन',
'Edit Marker': 'चिन्ह परिवर्तन',
'Edit Member': 'सदस्य परिवर्तन',
'Edit Membership': 'सदस्यता परिवर्तन',
'Edit Membership Type': 'सदस्यता प्रकार परिवर्तन',
'Edit Milestone': 'उद्देश्य परिवर्तन',
'Edit National Society': 'राष्ट्रिय समाज परिवर्तन',
'Edit Network': 'नेटवर्क परिवर्तन',
'Edit Office': 'कार्यलय परिवर्तन',
'Edit Office Type': 'कार्यलय प्रकार परिवर्तन',
'Edit Organization': 'संस्था परिवर्तन',
'Edit Organization Type': 'संस्था प्रकार परिवर्तन',
'Edit Output': 'नतिजा परिवर्तन',
'Edit Participant': 'सहभागी परिवर्तन',
'Edit Partner Organization': 'साझेदार संस्था परिवर्तन',
'Edit Permissions for %(role)s': '%(role)s को लागि स्वीकृतीहरू परिवर्तन',
'Edit Person Details': 'ब्यक्ति विवरण परिवर्तन',
"Edit Person's Details": 'ब्यक्तिको विवरण परिवर्तन',
'Edit PoI Type': 'धुर्व प्रकार परिवर्तन',
'Edit Point of Interest': 'रूचीको बुँदा परिवर्तन',
'Edit Policy or Strategy': 'नियम तथा लक्ष परिवर्तन',
'Edit Professional Experience': 'ब्यबसायिक अनुभव परिवर्तन',
'Edit Profile Configuration': 'प्रोफाइल बनावट परिवर्तन',
'Edit Program': 'कार्यक्रम परिवर्तन',
'Edit Project': 'परियोजना परिवर्तन',
'Edit Project Organization': 'परियोजना संस्था परिवर्तन',
'Edit Projection': 'योजना परिवर्तन',
'Edit Record': 'विवरण परिवर्तन',
'Edit Region': 'क्षेत्र परिवर्तन',
'Edit Resource': 'स्रोत परिवर्तन',
'Edit Resource Type': 'स्रोत प्रकार परिवर्तन',
'Edit Response Summary': 'प्रतिकृया संक्षेप परिवर्तन',
'Edit Role': 'भूमिका परिवर्तन',
'Edit Room': 'कोठा परिवर्तन',
'Edit saved search': 'संचित खोजी परिवर्तन',
'Edit Sector': 'क्षेत्र परिवर्तन',
'Edit Service': 'सेवा परिवर्तन',
'Edit Skill': 'सिप परिवर्तन',
'Edit Skill Equivalence': 'सिप सरह परिवर्तन',
'Edit Skill Type': 'सिप प्रकार परिवर्तन',
'Edit Staff Assignment': 'कर्मचारी काम परिवर्तन',
'Edit Staff Member Details': 'कर्मचारी सदस्य विवरण परिवर्तन',
'Edit Status': 'अवस्था परिवर्तन',
'Edit Symbology': 'चिन्हता परिवर्तन',
'Edit Task': 'काम परिवर्तन',
'Edit Team': 'समूह परिवर्तन',
'Edit the OpenStreetMap data for this area': 'यो क्षेत्रको लागि खुलासडकनक्सा आँकडा परिवर्तन',
'Edit Theme': 'स्वरूप परिवर्तन',
'Edit Theme Data': 'स्वरूप आँकडा परिवर्तन',
'Edit this entry': 'यो प्रवेश परिवर्तन',
'Edit Training': 'तालिम परिवर्तन',
'Edit Training Event': 'तालिम कार्यक्रम परिवर्तन',
'Edit Volunteer Cluster': 'स्वयम्-सेवक समूह परिवर्तन',
'Edit Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद परिवर्तन',
'Edit Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार परिवर्तन',
'Edit Volunteer Details': 'स्वयम्-सेवक विवरण परिवर्तन',
'Edit Volunteer Role': 'स्वयम्-सेवक भूमिका परिवर्तन',
'Education': 'शिक्षा',
'Education & Advocacy': 'शिक्षा र वकालत',
'Education & School Safety': 'शिक्षा र विद्यालय सुरक्षा',
'Education Details': 'शिक्षा विवरण',
'Education details added': 'शिक्षा विवरण संचित गरियो',
'Education details deleted': 'शिक्षा विवरण हटाइयो',
'Education details updated': 'शिक्षा विवरण परिमार्जन गरियो',
'Effort Report': 'सामर्थ्य प्रतिवेदन',
'Either a shelter or a location must be specified': 'बसोबास अथवा स्थानमध्ये कुनैपनि पहिचान गरिनै पर्छ',
'Either file upload or image URL required.': 'फाइल अपलोड वा तस्विर यू.आर.एल. आवश्यक पर्छ ।',
'Email': 'इमेल',
'Email Address': 'इमेल ठेगाना',
'Emergency Contacts': 'आपतकालिन सम्पर्कहरू',
'Emergency Health': 'आकस्मिक स्वास्थ्य',
'Emergency Householdwater Treatment and Storage': 'आपतकालिन गृह खानेपानि उपचार तथा भण्डार',
'Emergency Medical Technician': 'आकस्मिक मेडिकल प्राविधिक',
'Emergency Shelter': 'आपतकालिन वसोबास',
'Emergency Telecommunications': 'आपतकालिन टेलिफोन संचार',
'Emergency Water Supply': 'आपतकालिन पानी पुर्ती',
'Emergency WASH': 'आकस्मिक खानेपानी तथा सरसफाई',
'Empty': 'खाली',
'Enable': 'सक्रिय',
'Enable in Default Config?': 'स्वचलानमा रहेको बनावटलाई सक्रिय गर्न चाहानुहुन्छ?',
'End Date': 'अन्तिम मिति',
"Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": "खोजीको निम्ति नाम टाइप गर्नुहोस् । तपाईँले वाइल्डकार्डको रूपमा % प्रयोग गर्न सक्नुहुन्छ । सबै वस्तुहरूको तालिका इनपुट बिना नै 'खोजी' थिच्नुहोस् ।",
'Enter a valid email': 'मान्य इमेल राख्नुहोस्',
'Enter a valid phone number': 'मान्य फोन नम्बर राख्नुहोस्',
'enter a value': 'राख्नुहोस्',
'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'स्पेलिङ गल्ति नगरि टाइप गर्नुहोस्, यो क्षेत्र भइरहेको आँकडासँग मिल्नु पर्छ ।',
'enter date and time': 'मिति र समय राख्नुहोस्',
'enter date and time in range %(min)s %(max)s': 'मिति र समय %(min)s %(max)s भित्र राख्नुहोस्',
'enter date and time on or after %(min)s': ' %(min)s वा त्यस्पछि मिति र समय प्रवेश गर्नुहोस्',
'enter date and time on or before %(max)s': '%(max)s वा त्यस्अघि मिति र समय प्रवेश गर्नुहोस्',
'Enter some characters to bring up a list of possible matches': 'सम्भावित मेलहरूको तालिका निकाल्नको निम्ति केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'Enter the same password as above': 'माथीको पासवर्ड पुन राख्नुहोस्',
'Enter your first name': 'तपाईंको पहिलो नाम राख्नुहोस्',
'Enter your organization': 'तपाईंको संस्था राख्नुहोस्',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'फोन नम्बर राख्नु भनेको स्वेच्छिक हो, तर सो राख्नुभएको खण्डमा तपाईले एस.एम.एस. संदेश प्राप्त गर्नुहुनेछ ।',
'Enterprise Development Training ': 'परियोजना विकास तालिम ',
'Entity': 'अंग',
'Environment': 'वातावरण',
'Epidemic': 'महामारी',
'Epidemic/Pandemic Preparedness': 'महामारी/ विश्वब्यापी महामारी पूर्वतयारी',
'Errors': 'गल्तीहरू',
'ESRI Shape File': 'इ.एस.आर.आइ. आकार फाइल',
'Essential Staff?': 'अतिआवश्यक कर्मचारी?',
'Estimated Reopening Date': 'पुन: खुलाहुने अन्दाजि मिति',
'Ethnicity': 'प्रजातिय',
'Euros': 'यूरो',
'Evacuating': 'खाली गर्नु',
'Evacuation Drills': 'खाली गर्ने शसस्त्र बल',
'Events': 'कार्यक्रमहरू',
'Excellent': 'उत्कृष्ट',
'Exercise': 'अभ्यास',
'Excreta Disposal': 'दिशा विर्सजन',
'Experience': 'अनुभव',
'expired': 'मिति समाप्त',
'Expiring Staff Contracts Report': 'मम्याद सकिन लागेको कर्मचारीको सम्झौता प्रतिवेदन',
'Expiry (months)': 'म्याद सकिने (महिनाहरू)',
'Expiry Date': 'म्याद सकिने मिति',
'Export as': 'को रूपमा निर्यात',
'Export in %(format)s format': '%(format)s प्रकारमा निर्यात',
'Export in GPX format': 'जि.पि.एप्क्स.प्रकारमा निर्यात',
'Export in KML format': 'के.एम.एल. प्रकारमा निर्यात',
'Export in OSM format': 'ओ.एस.एम. प्रकारमा निर्यात',
'Eye Color': 'मानव दृष्य रंग',
'Facial hair, color': 'अनुहारको रौं, रंग',
'Facial hair, comment': 'अनुहारको रौं, टिप्पणी',
'Facial hair, length': 'अनुहारको रौं, लम्बाइ',
'Facial hair, type': 'अनुहारको रौं, प्रकार',
'Facilities': 'सूबिधाहरू',
'Facility': 'सुविधा',
'Facility added': 'सुविधा संचित गरियो',
'Facility Contact': 'सुविधा सम्पर्क',
'Facility deleted': 'सुविधा हटाइयो',
'Facility Details': 'सुविधा विवरण',
'Facility Status': 'सुविधा अवस्था',
'Facility Type': 'सुविधा प्रकार',
'Facility Type added': 'सुविधा प्रकार संचित गरियो',
'Facility Type deleted': 'सुविधा प्रकार हटाइयो',
'Facility Type Details': 'सुविधा प्रकार विवरण',
'Facility Type updated': 'सुविधा प्रकार परिमार्जन गरियो',
'Facility Types': 'सुविधा प्रकारहरू',
'Facility updated': 'सुविधा परिमार्जन गरियो',
'Fail': 'असफल',
'Fair': 'उचित',
'Family': 'परिवार',
'fat': 'मोटो',
'Fax': 'फ्याक्स',
'Feature Info': 'विशेषता जानकारी',
'Feature Layer': 'विशेषता तह',
'Feature Layer added': 'विशेषता तह संचित गरियो',
'Feature Layer deleted': 'विशेषता तह हटाइयो',
'Feature Layer Details': 'विशेषता तह विवरण',
'Feature Layer updated': 'विशेषता तह परिमार्जन गरियो',
'Feature Layers': 'विशेषता तहहरू',
'Feature Namespace': 'विशेषता नाम स्थान',
'Feature Type': 'विशेषता प्रकार',
'Features Include': 'विशेषताहरू भन्नाले',
'Feedback': 'प्रतिकृया',
'Feeding Programmes': 'खुवाउने कार्यक्रम',
'female': 'महिला',
'Field': 'क्षेत्र',
'File': 'फाइल',
'Files': 'फाइलहरू',
'fill in order: day(2) month(2) year(4)': 'खालि ठाँउ भर्नुहोस्: दिन(२) महिना(२) वर्ष(४)',
'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'खालि ठाँउ भर्नुहोस्: घण्टा(२) कम्तिमा (२) दिन(२) महिना(२) वर्ष(४)',
'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'खालि ठाँउ भर्नुहोस्: घण्टा(२) कम्तिमा (२) महिना(२) दिन(२) वर्ष(४)',
'fill in order: month(2) day(2) year(4)': 'खालि ठाँउ भर्नुहोस्: महिना(२) दिन(२) वर्ष(४)',
'Filter': 'फिल्टर',
'Filter by Location': 'स्थान को आधारमा फिल्टर',
'Filter Options': 'फिल्टर विकल्पहरू',
'Filter type': 'फिल्टर प्रकार',
'Filter type ': 'फिल्टर प्रकार ',
'Finance / Admin': 'वित्तिय / संचालक',
'Finance Officer': 'वित्तिय कर्मचारी',
'Financial Risk Sharing ': 'वित्तिय खतरा बाँडफाँड',
'Financial Services': 'वित्तिय सेवाहरू',
'Financial System Development': 'वित्तिय प्रणाली विकास',
'Find more': 'थप प्राप्त गर्नुहोस्',
'Find on Map': 'नक्सामा प्राप्त गर्नुहोस्',
'Fingerprint': 'औँठाछाप',
'Fire': 'आगलागी',
'First': 'पहिलो',
'First Aid': 'प्राथमिक उपचार',
'First Name': 'पहिलो नाम',
'Fleet Manager': 'जहाज समूह व्यवस्थापक',
'Flood': 'बाढी',
'Focal Person': 'मुख्य ब्यक्ति',
'Folder': 'फोल्डर',
'Food Security': 'खाद्य सुरक्षा',
'Food Supplementation': 'खानेकुरा बाँडफाट',
'For Entity': 'अंगको लागि',
'For live help from the Sahana community on using this application, go to': 'यो एप्लिकेशन प्रयोग गरेबापत साहाना समुदाय मार्फत सहयोग दिन चाहानुहुन्छ भने, जानुहोस्',
'For more details on the Sahana Eden system, see the': 'साहाना इदेन प्रकृयाको बारेमा थप विवरण को लागि, हेर्नुहोस्',
'forehead': 'निधार',
'form data': 'फारम आँकडा',
'Form Settings': 'फारम सेटिङ',
'Format': 'नमुना',
'found': 'प्राप्त भयो',
'Frequency': 'फ्रीक्वेन्सी',
'Full beard': 'पूर्ण दारी',
'Fullscreen Map': 'पूर्णस्क्रिन नक्सा',
'Function': 'कार्य',
'Function Permissions': 'कार्य स्वीकृतीs',
'Funding': 'अनुदान कार्यक्रम',
'Funding Report': 'अनुदान कार्यक्रम प्रतिवेदन',
'Funds Contributed': 'दिइएको अनुदान',
'Gap Analysis Map': 'दुरी अनुसन्धान नक्सा',
'Gap Analysis Report': 'दुरी अनुसन्धान प्रतिवेदन',
'Gender': 'लिङ्ग',
'Generator': 'जेनेरेटर',
'Geocode': 'जिओ कोड',
'Geocoder Selection': 'जिओ कोड छान्ने',
'GeoJSON Layer': 'जिओ जे.एस.एन.तह',
'Geometry Name': 'ज्यामिती नाम',
'GeoRSS Layer': 'जिओ आर.एस.एस. तह',
'Get Feature Info': 'विशेषता जानकारी प्राप्त गर्नुहोस्',
'getting': 'नजिकिँदै',
'GIS & Mapping': 'भौगोलिक सूचना प्रणाली र नक्सांकन',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'तस्विरको विस्तृत विवरण दिनुहोस्, जस्तै तस्बिरको कहाँ के देख्न सकिन्छ (वैकल्पिक).',
'Go': 'जानुहोस्',
"Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "%(url)s जानुहोस्, खोल्नुहोस र तपाईंको एप्लिकेसन दर्ता गर्नुहोस् । तपाईंले जुनसुकै यूआरएल राख्न सक्नुहुन्छ र तपाईंले खालि 'नक्सा परिवर्तन' स्वीकृती मा क्लिक गर्न सक्नुहुन्छ ।",
'Go to Functional Map': 'कार्यात्मक नक्सामा जानुहोस्',
'Goatee': 'गोटि',
'Good': 'राम्रो',
'Google Layer': 'गुगल तह',
'Google Maps': 'गुगल नक्सा(हरु)',
'Google Satellite': 'गुगल उपग्रह',
'Governance': 'कार्यकारिणी',
'Government': 'सरकार',
'GPS Marker': 'जि.पि.एस. चिन्ह',
'GPS Track': 'जि.पि.एस. ट्र्याक',
'GPS Track File': 'जि.पि.एस. ट्र्याक फाइल',
'GPX Layer': 'जि.पि.एक्स्. तह',
'Grade': 'कक्षा',
'Graph': 'ग्राफ',
'Great British Pounds': 'ब्रिटिस पाउण्ड',
'Greater than 10 matches. Please refine search further': '१० भन्दा धेरै मिल्यो । कृपया अघी खोजी मिलाउनुहोस्',
'green': 'हरियो',
'grey': 'खैरो',
'Grid': 'ग्रीड',
'Group': 'समूह',
'Group added': 'समूह संचित गरियो',
'Group deleted': 'समूह हटाइयो',
'Group description': 'समूह ब्याख्या',
'Group Description': 'समूह ब्याख्या',
'Group Details': 'समूह विवरण',
'Group Head': 'समूह प्रमुख',
'Group Leader': 'समूह अगुवा',
'Group Member added': 'समूह सदस्य संचित गरियो',
'Group Members': 'समूह सदस्यहरू',
'Group Name': 'समूह नाम',
'Group Type': 'समूह प्रकार',
'Group updated': 'समूह परिमार्जन गरियो',
'Grouped by': 'अनुसार समूह निर्धारण गरियो',
'Groups': 'समूहहरू',
'Hair Color': 'रौं रंग',
'Hair Comments': 'रौं टिप्पणीहरू',
'Hair Length': 'रौं लम्बाइ',
'Hair Style': 'रौं बनावट',
'Hand Washing Facilities': 'हात धुने सूबिधाहरू',
'Hazard': 'खतरा',
'Hazard added': 'खतरा संचित गरियो',
'Hazard added to Project': 'परियोजनामा खतरा संचित गरियो ',
'Hazard deleted': 'खतरा हटाइयो',
'Hazard Details': 'खतरा विवरण',
'Hazard removed from Project': 'परियोजनाबाट खतरा हटाइयो',
'Hazard updated': 'खतरा परिमार्जन गरियो',
'Hazards': 'खतराहरु',
'Headquarters': 'प्रमुखनिवासहरू',
'Health': 'स्वास्थ्य',
'Health & Health Facilities': 'स्वास्थ्य र स्वास्थ्य सुविधाहरु',
'Health Awareness, Promotion': 'स्वास्थ्य जनचेतना, बढुवा',
'Health Facilities - Construction and Operation': 'स्वास्थ्य सूबिधाहरू - निर्माण र कृयाकलाप',
'Health Policy, Strategy Development': 'स्वास्थ्य नियम, उद्देश्य विकास',
'Heat Wave': 'लु',
'Height': 'उचाइ',
'Height (cm)': 'उचाइ (सेमि)',
'Heliport': 'हेलिपोर्ट',
'Help': 'सहयोग',
'HFA': 'एच.एफ.ए.',
'HFA Priorities': 'एच.एफ.ए. प्रमुखताहरू',
'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'एच.एफ.ए.१: प्रकोप खतरा न्यूनिकरण एक राष्ट्रिय तथा स्थानिय प्रमुखता हो जस्लाई लागु गर्नको निम्ति बलियो संस्थाअन्तिम आधार रहेको छ ।',
'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'एच.एफ.ए.२: प्रकोप खतराहरूको पहिचान, मुल्याङ्कन, नियन्त्रण र पुर्व चेतावनीलाई बृहत बनाउने ।',
'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'एच.एफ.ए.३: सबै तहमा सहजता तथा सुरक्षाको वातावरण निर्माण गर्नको निम्ति ज्ञान, बुद्दि तथा शिक्षाको प्रयोग गर्नु ।',
'HFA4: Reduce the underlying risk factors.': 'एच.एफ.ए.४: हालको खतराका कारणहरूलाई कम गर्नु ।',
'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'एच.एफ.ए.५: सबै तहमार प्रभावकारी प्रतिकृयाको निम्ति प्रकोप पुर्व तयारीलाई बलियो बनाउने ।',
'Hide': 'लुकाउनुहोस्',
'Hide Chart': 'तालिका लुकाउनुहोस्',
'Hide Pivot Table': 'वृत्त तालिका लुकाउनुहोस्',
'Hide Table': 'तालिका लुकाउनुहोस्',
'Hierarchy': 'बनावट',
'Hierarchy Level 1 Name (e.g. State or Province)': 'बनावट स्तर १ नाम (जस्तै, राज्य वा अंचल)',
'Hierarchy Level 2 Name (e.g. District or County)': 'बनावट स्तर २ नाम (जस्तै, जिल्ला वा क्षेत्र)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'बनावट स्तर ३ नाम (जस्तै, शहर / नगर / गाउँ)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'बनावट स्तर ४ नाम (जस्तै, छिमेक)',
'Hierarchy Level 5 Name': 'बनावट स्तर ५ नाम',
'High': 'उच्च',
'Highest Priority Open Requests': 'उच्च प्राथमिकता खुला अनुरोधहरू',
'Hindu': 'हिन्दु',
'Home Address': 'गृह ठेगाना',
'Home Country': 'गृह देश',
'Home Phone': 'गृह फोन',
'Honorary': 'मानार्थ',
'Hospital': 'अस्पताल',
'Hospitals': 'अस्पतालs',
'Host': 'संचालक',
'Host National Society': 'संचालक राष्ट्रिय सोसाइटी',
'Hour': 'घण्टा',
'Hourly': 'प्रति घण्टा',
'Hours': 'समय (घण्टा)',
'hours': 'समय (घण्टा)',
'Hours added': 'समय (घण्टा) संचित गरियो',
'Hours by Program Report': 'कार्यक्रम प्र्र्रतिवेदनमा समय (घण्टा)',
'Hours by Role Report': 'भूमिका प्रतिवेदनमा समय (घण्टा)',
'Hours deleted': 'समय (घण्टा) हटाइयो',
'Hours Details': 'समय (घण्टा) विवरण',
'Hours updated': 'समय (घण्टा) परिमार्जन गरियो',
'House Design': 'घर डिजाइन',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'कति विवरण देखियो । उच्च जुम स्तरको अर्थ विस्तृत विवरण, तर धेरै क्षेत्रको होइन । थोरै जुम गर्दा धेरै क्षेत्र देखिन्छ तर विस्तृत विवरण कम हुन्छ ।',
'How often you want to be notified. If there are no changes, no notification will be sent.': 'कति पटक तपाईंलाई जानकारि दिइएको चाहानुहुन्छ । कुनै परिवर्तन गरिएन भने, कुनै जानकारी पठाइने छैन ।',
'How you want to be notified.': 'तपाईं कसरी जानकारी प्राप्त गर्न चाहानुहुन्छ?',
'HTML': 'एच.टि.एम.एल.',
'Human Resource': 'मानव स्रोत',
'Human Resource Development': 'जनशक्ति विकास',
'Human Resources': 'मानव स्रोतहरू',
'Hygiene Promotion': 'स्वच्छता प्रबर्धन',
'Hyogo Framework for Action (HFA)': 'हूय्गो कार्यसंरचना',
'I agree to the %(terms_of_service)s': '%(terms_of_service)s मा सहमत छु ।',
'ICBRR Staff': 'आइ.सि.बि.आर.आर. कर्मचारी',
'ID': 'आइ.डि.',
'ID Tag Number': 'आइ.डि. ट्याग संख्या',
'ID type': 'आइ.डि. प्रकार',
'Identities': 'आइ.डि. अंगहरू',
'Identity': 'परिचय',
'Identity added': 'परिचय संचित गरियो',
'Identity deleted': 'परिचय हटाइयो',
'Identity Details': 'परिचय विवरण',
'Identity updated': 'परिचय परिमार्जन गरियो',
'IEC Materials': 'सूचना, शिक्षा र संचार सामग्री',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'यदि कुनै प्रयोगकर्ताले यस साइटमा आफ्नो इमेल ठेगाना छ भनेर प्रमाणित गर्छ भने, जसलाई थप प्रमाणित गर्न आवश्यक पर्ने ब्यक्तिले प्रमाणित क्षेत्र प्रयोग गर्दछ ।',
'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'जाँच गरिएको खण्डमा जानकारीमा परिवर्तन गरिएको सम्पूर्ण विवरण हुँदछ ।',
'If it is a URL leading to HTML, then this will downloaded.': 'यदि यो यू.आर.एल., एच.टि.एम.एल.ले प्रतिनिधित्व गरेको छ भने, यो डाउनलोड हुनेछ ।',
'If neither are defined, then the Default Marker is used.': 'कुनै पनि परिभाषित भएन भने, स्वचलानमा रहेको चिन्ह प्रयोग हुनेछ ।',
'If not found, you can have a new location created.': 'यदि प्राप्त भएन भने, तपाईंले नयाँ क्षेत्र बनाईएको पाउनुहुनेछ ।',
'If the location is a geographic area, then state at what level here.': 'यदि स्थान एउटा भौगोलिक स्थान हो भने, यहाँ कुन स्तरमा छ बताउनुहोस् ।',
'If the person counts as essential staff when evacuating all non-essential staff.': 'सम्पूर्ण अनावश्यक कर्मचारीलाई निकाला गर्ने क्रममा, यदि ब्यक्ति एक अति आवश्यक कर्मचारीको रूपमा लिइन्छ भने ।',
'If there are multiple configs for a person, which should be their default?': 'यदि कुनै ब्यक्तिको लागि बहुमुखिय बनावटहरू छन भने, उनिहरूको स्वचलानमा रहेको कुन् हो?',
"If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'यदि यो बनावट जि.आइ.एस. बनावट मेनु मा देखाईयो भने, मेनुलाई प्रयोग गर्नको निम्ति एउटा नाम दिनुहोस् । ब्यक्तिअन्तिम नक्सा को नाम बनावट द्वारा स्चालितरूपमा प्रयोग कर्ताको नाम राख्नेछ ।',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'डोमेन नमिलेको खण्डमा बाहेक, यो क्षेत्रमा एकाउण्ट बनाउदा धेरै जनसंख्या चाप भएमा प्रयोग कर्ता जस्ले संस्थाको रूपमा आफूलाई प्रतिनिधित्व गरेको हुन्छ भने उसलाई संस्थाको कर्मचारीकोरूपमा कार्य दिइन्छ ।',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'यदि यो क्षेत्रमा चाप भएमा ब्यक्तिको डोमेनलाई आधार मानि स्वचालितरूपमा नै ब्यक्तिलाई यो संस्थाको कर्मचारीकोरूपमा काम दिइनेछ ।',
'If this record should be restricted then select which role is required to access the record here.': 'यदि यो विवरणलाई सुरक्षित गर्न पर्छ भने, यसमा पहुँचको लागि कुन भूमिका आवस्यक पर्छ छान्नुहोस् ।',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'यदि यो विवरणलाई सुरक्षित गर्न पर्छ भने, यसमा भएको विवरणमा पहुँचको लागि कुन-कुन् भूमिका(हरू) लाई स्वीकृति दिइएको छ छान्नुहोस् ।',
"If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ कृयाकलाप राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ कृयाकलाप राख्न सक्नुहुन्छ ।',
"If you don't see the beneficiary in the list, you can add a new one by clicking link 'Create Beneficiary'.": 'यदि तालिकामा भागिदार देख्नुहुन्न भने, तपाईं "नयाँ भागिदार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ भागिदार राख्न सक्नुहुन्छ ।',
"If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ क्याम्पिन राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्याम्पिन राख्न सक्नुहुन्छ ।',
"If you don't see the Cluster in the list, you can add a new one by clicking link 'Create Cluster'.": 'यदि तालिकामा समूह देख्नुहुन्न भने, तपाईं "नयाँ समूह राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ समूह राख्न सक्नुहुन्छ ।',
"If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ समूदाय राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ समूदाय राख्न सक्नुहुन्छ ।',
"If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'यदि तालिकामा स्थान देख्नुहुन्न भने, तपाईं "नयाँ स्थान राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ स्थान राख्न सक्नुहुन्छ ।',
"If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.": 'यदि तालिकामा उद्देश्य देख्नुहुन्न भने, तपाईं "नयाँ उद्देश्य राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ उद्देश्य राख्न सक्नुहुन्छ ।',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'यदि तालिकामा संस्था देख्नुहुन्न भने, तपाईं "नयाँ संस्था राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ संस्था राख्न सक्नुहुन्छ ।',
"If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": 'यदि तालिकामा परियोजना देख्नुहुन्न भने, तपाईं "नयाँ परियोजना राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ परियोजना राख्न सक्नुहुन्छ ।',
"If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": 'यदि तालिकामा क्षेत्र देख्नुहुन्न भने, तपाईं "नयाँ क्षेत्र राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्षेत्र राख्न सक्नुहुन्छ ।',
"If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ कृयाकलाप प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ कृयाकलाप राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ सुविधा प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ सूविधा राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ संस्था प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ प्रकार राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'यदि तालिकामा संस्था देख्नुहुन्न भने, तपाईं "नयाँ संस्था प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ संस्था राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Region'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ क्षेत्र राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्षेत्र राख्न सक्नुहुन्छ ।',
"If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "यदि तपाईं फोल्डरनाम राख्नुहुन्छ भने, उक्त फोल्डरमा नक्साको तह खुल्ने एउटा तह देखा पर्नेछ । '/' ले नाम छुट्याएर यसमा सहायोग फोल्डर बनाउन सकिनेछ ।",
'If you have any questions or need support, please see': 'यदि तपाईंसँग कुनै प्रश्न छ वा सहयोगको आवश्यकता छ भने, कृपया हेर्नुहोस्',
'If you would like to help, then please %(sign_up_now)s': 'यदि तपाईं सहयोग गर्न चाहानुहुन्छ भने कृपया %(sign_up_now)s',
'ignore': 'अस्विकार',
'Ignore Errors?': 'गल्तीहरूलाई बेवास्ता गर्नुहुन्छ?',
'Image': 'तस्विर',
'Image added': 'तस्विर संचित गरियो',
'Image deleted': 'तस्विर हटाइयो',
'Image Details': 'तस्विर विवरण',
'Image File(s), one image per page': 'तस्विर फाइल(हरू), प्रत्येक पेजको लागि एउटा तस्विर',
'Image Type': 'तस्विर प्रकार',
'Image updated': 'तस्विर परिमार्जन गरियो',
'Images': 'तस्विरहरू',
'Immediately': 'तत्कालै',
'Immunisation Campaigns': 'सूइहाल्ने क्याम्पिनहरू',
'Import': 'आयात',
'Import Activity Data': 'आयात कृयाकलाप आँकडा',
'Import Activity Type data': 'आयात कृयाकलाप प्रकार आँकडा',
'Import Annual Budget data': 'आयात वार्षिक बजेट आँकडा',
'Import Awards': 'आयात परस्कारहरू',
'Import Certificates': 'आयात प्रमाण-पत्रहरू',
'Import Community Data': 'आयात समुदाय आँकडा',
'Import Contacts': 'आयात सम्पर्क',
'Import Courses': 'आयात पाठ्यक्रम',
'Import Data': 'आयात आँकडा',
'Import Data for Theme Layer': 'स्वरूप तहको लागि आयात आँकडा',
'Import Departments': 'आयात मन्त्रालयहरू',
'Import Deployments': 'आयात परियोजन',
'Import Facilities': 'आयात सूबिधाहरू',
'Import Facility Types': 'आयात सुविधा प्रकार',
'Import from CSV': 'सि.एस.भि. बाटको आयात',
'Import from OpenStreetMap': 'खुलासडकनक्सा बाटको आयात',
'Import Hazard data': 'आयात खतरा आँकडा',
'Import Hazards': 'आयात खतरा',
'Import Hours': 'आयात समय ',
'Import Layers': 'आयात तह',
'Import Location Data': 'आयात स्थान आँकडा',
'Import Location data': 'आयात स्थान आँकडा',
'Import Locations': 'आयात स्थान',
'Import Logged Time data': 'आयात तालिका समय आँकडा',
'Import Members': 'आयात सदस्य',
'Import Membership Types': 'आयात सदस्यता प्रकार',
'Import Offices': 'आयात कार्यलय',
'Import Organizations': 'आयात संस्था',
'Import Participant List': 'सहगागीको सुची समावेश',
'Import Participants': 'आयात सहभागी',
'Import Partner Organizations': 'आयात साझेदार संस्था',
'Import PoI Types': 'आयात पोल प्रकार(हरू)',
'Import Points of Interest': 'आयात रूचीको बुँदा',
'Import Policies & Strategies': 'आयात नियम तथा उद्देश्य',
'Import Project Organizations': 'आयात परियोजना संस्था',
'Import Projects': 'आयात परियोजना',
'Import Red Cross & Red Crescent National Societies': 'आयात रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटि',
'Import Resource Types': 'आयात स्रोत प्रकार(हरू)',
'Import Resources': 'आयात स्रोत',
'Import Sector data': 'आयात क्षेत्र आँकडा',
'Import Service data': 'आयात सेवा आँकडा',
'Import Services': 'आयात सेवा',
'Import Staff': 'कर्मचारीको फाईल आयात',
'Import Tasks': 'आयात काम',
'Import Theme data': 'आयात स्वरूप आँकडा',
'Import Training Events': 'आयात तालिम कार्यक्रम',
'Import Training Participants': 'तालिमका सहभागीहरुका सूची आयात गर्नुहोस्',
'Import Volunteer Cluster Positions': 'आयात स्वयम्-सेवक समूह पद',
'Import Volunteer Cluster Types': 'आयात स्वयम्-सेवक समूह प्रकार',
'Import Volunteer Clusters': 'आयात स्वयम्-सेवक समूह',
'Import Volunteers': 'आयात स्वयम्-सेवक',
'Improved Production Techniques': 'सुधारिएको उत्पादन उपाय',
'In error': 'गल्तीमा',
'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'आन्तरिकबा खुलासडकनक्सा आँकडालाई परिवर्तन गर्नको निम्ति %(name_short)s, खुलासडक सर्भरमा तपाईंले एउटा एकाउन्ट दर्ता गर्नु पर्दछ ।',
'Inactive': 'निस्कृय',
'InBox': 'इनबक्स',
'Incident': 'घटनाहरु',
'Incident Categories': 'घटनाहरु प्रकारहरू',
'Incident Reports': 'घटनाहरुको प्रतिवेदन ',
'Incidents': 'घटनाहरु',
'Incorrect parameters': 'गलत प्यारामिटर(हरू)',
'Infant (0-1)': 'नवालक (0-१)',
'Infant and Young Child Feeding': 'नवालक र ठूलो बच्चा खुवाउने',
'Information Management': 'सूचना व्यवस्थापन',
'Information Technology': 'सूचना प्रविधि',
'Infrastructure Development': 'पूर्वाधार विकास',
'Inherited?': 'भागिदारमा रहेको?',
'Initials': 'सुरुवात(हरू)',
'injured': 'घाइते',
'input': 'इनपुट',
'Insect Infestation': 'किराबाट हुने संक्रमण',
'Installation of Rainwater Harvesting Systems': 'वर्षा पानि बटुल्ने प्रकृया(हरू) इन्सटलेसन्',
'Instructor': 'सिकाउने',
'insufficient number of pages provided': 'अपर्याप्त पृष्ठ संख्या प्रदान',
'Insufficient Privileges': 'अपर्याप्त मौका(हरू)',
'Insufficient vars: Need module, resource, jresource, instance': 'अपर्याप्त बारहरू: एकाइ, स्रोत, जेस्रोत, अवस्थाको आवश्यकता',
'Insurance ': 'बिमा',
'Integrity error: record can not be deleted while it is referenced by other records': 'इमानदारिता गल्ती: विवरण हटाइउन सकिँदैन जव यसलाई अन्य विवरणहरूले उल्लेख गरेको हुन्छ ।',
'Intergovernmental': 'सरकारको आन्तरिक',
'Invalid data: record %(id)s not accessible in table %(table)s': 'अमान्य आँकडा: तालिकामा विवरण %(id)s पहुँच हुन नसक्ने %(table)s',
'Invalid form (re-opened in another window?)': 'अमान्य फारम (अर्को विण्डोमा पुन खुला गरिएको छ?)',
'Invalid Location!': 'अमान्य स्थान!',
'Invalid phone number': 'अमान्य फोन नम्बर',
'Invalid phone number!': 'अमान्य फोन नम्बर!',
'Invalid request': 'अमान्य अनुरोध',
'Invalid Site!': 'अमान्य क्षेत्र!',
'Invalid source': 'अमान्य स्रोत',
'Inventory': 'लेखा विवरण',
'Inventory Items': 'लेखा विवरण वस्तु(हरू)',
'Irrigation and Watershed Management': 'सिँचाई र पानि बाँडफाँड ब्यबस्थापन',
'Is editing level L%d locations allowed?': 'के परिवर्तन स्तर L%d स्थान(हरू) अनुमति दिइएको हो?',
'Is this a strict hierarchy?': 'के यो कडा बनावट हो?',
'Issuing Authority': 'अधिकार निकालिँदै',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'सकृय भएको स्थानहरू मात्र होइन, यसले परियोजना क्षेत्रको पहुँचमा उपलब्ध गराइएको क्षेत्रको जानकारि पनि लिँदछ ।',
'IT Telecom': 'आइ.टि. टेलिकम',
'Item': 'वस्तु',
'Item Categories': 'वस्तु प्रकारहरू',
'Items': 'वस्तु(हरू)',
'Jewish': 'यहुदि',
'JNAP Priorities': 'जे.एन.ए.पि. प्राथमिकताहरू',
'JNAP-1: Strategic Area 1: Governance': 'जे.एन.ए.पि.-१: लक्षात्मक क्षेत्र १: जाँच',
'JNAP-2: Strategic Area 2: Monitoring': 'जे.एन.ए.पि.-२: लक्षात्मक क्षेत्र २: अनुगमन',
'JNAP-3: Strategic Area 3: Disaster Management': 'जे.एन.ए.पि.-३: लक्षात्मक क्षेत्र ३: प्रकोप ब्यबस्थापन',
'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'जे.एन.ए.पि.-४: लक्षात्मक क्षेत्र ४: खतरा न्यूनिकरण र मौसम परिवर्तन लागु गर्ने कार्य',
'Job added': 'काम संचित गरियो',
'Job deleted': 'काम हटाइयो',
'Job Schedule': 'काम तालिका',
'Job Title': 'पद',
'Job Title added': 'पद संचित गरियो',
'Job Title Catalog': 'पदहरुको विवरण क्याटलग',
'Job Title deleted': 'पद हटाइयो',
'Job Title Details': 'पद विवरण',
'Job Title updated': 'पद परिमार्जन गरियो',
'Job Titles': 'पद',
'Job updated': 'काम परिमार्जन गरियो',
'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': ' प्रकोप खतरा ब्यबस्थापन र मौसम परिवर्तन लागु गर्ने कार्यको लागि संयुक्त राष्ट्रिय कार्य योजना । कुक आइस्ल्याण्डको लागि मात्र लागु हुने ।',
'Journal': 'लेख',
'Journal entry added': 'लेख प्रवेश संचित गरियो',
'Journal entry deleted': 'लेख प्रवेश हटाइयो',
'Journal Entry Details': 'लेख प्रवेश विवरण',
'Journal entry updated': 'लेख प्रवेश परिमार्जन गरियो',
'JS Layer': 'जे.एस. तह',
'Keep Duplicate': 'नक्कल प्रति राख्नुहोस्',
'Keep Original': 'सक्कल प्रति राख्नुहोस्',
'Key': 'चाबि',
'Key Value pairs': 'चाबि महत्व जोडी(हरू)',
'Keyword': 'मुख्यशब्द',
'Keyword Added': 'मुख्यशब्द संचित गरियो',
'Keyword Deleted': 'मुख्यशब्द हटाइयो',
'Keyword Updated': 'मुख्यशब्द परिमार्जन गरियो',
'Keywords': 'मुख्यशब्द(हरू)',
'Kit': 'किट',
'KML Layer': 'के.एम.एल. तह',
'Knowledge Management': 'ज्ञान व्यवस्थापन',
'Land Slide': 'पहिरो',
'Language': 'भाषा',
'Last': 'अन्तिम',
'Last Checked': 'अन्तिम जाँच',
'Last Contacted': 'अन्तिममा सम्पर्क गरिएको',
'Last known location': 'अन्तिम थाहा भएको स्थान',
"Last Month's Work": 'अन्तिम महिनाको काम',
'Last Name': 'अन्तिम नाम',
'Last run': 'अन्तिम प्रयोग',
'Last status': 'अन्तिम अवस्था',
'Last updated': 'अन्तिम परिमार्जन गरियो ',
"Last Week's Work": 'अन्तिम हप्ताको काम',
'Latitude': 'अक्षांश',
'Latitude & Longitude': 'अक्षांश र देशान्तर',
'Latitude and Longitude are required': 'अक्षांश र देशान्तर आवश्यक पर्ने',
'Latitude is Invalid!': 'अक्षांश अमान्य!',
'Latitude is North - South (Up-Down).': 'अक्षांश उत्तर - दक्षिण (माथी-तल)।',
'Latitude is North-South (Up-Down).': 'अक्षांश उत्तर-दक्षिण (माथी-तल)।',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'इक्वेटरमा अक्षांश सुन्य र उत्तरी गोलार्धमा सकरात्मक र दक्षिणि गोलार्धमा नकरात्मक',
'Latitude must be between -90 and 90.': 'अक्षांश -९0 र ९0 मध्येमा हुनुपर्छ ।',
'Latitude of far northern end of the region of interest.': 'उत्तरको अन्त्य रूचीको क्षेत्रको अक्षांश',
'Latitude of far southern end of the region of interest.': 'दक्षिणको अन्त्य रूचीको क्षेत्रको अक्षांश',
'Latitude of Map Center': 'मध्य नक्साको अक्षांश ',
'Latitude should be between': 'अक्षांश मध्ये हुनुपर्छ',
'Latrine Construction': 'चर्पि निर्माण',
'Layer': 'तह',
'Layer added': 'तह संचित गरियो',
'Layer deleted': 'तह हटाइयो',
'Layer Details': 'तह विवरण',
'Layer has been Disabled': 'तह निस्कृय गरियो',
'Layer has been Enabled': 'तह सकृय गरियो',
'Layer Name': 'तह नाम',
'Layer Properties': 'तह प्रपटिज्',
'Layer removed from Symbology': 'तह चिन्हताबाट हटाइयो',
'Layer updated': 'तह परिमार्जन गरियो',
'Layers': 'तह(हरू)',
'Lead Implementer': 'मुख्य लागुकर्ता',
'Lead Implementer for this project is already set, please choose another role.': 'यो परियोजनाको मुख्य लागुकर्ता पहिले नै राखिएको छ, कृपय अर्को भूमिका छान्नुहोस्',
'Leader': 'अगुवा',
'Left-side is fully transparent (0), right-side is opaque (1.0).': 'वायाँ-तर्फ पूर्ण छर्लङ्ग छ(0), दायाँ-तर्फ छर्लङ्ग छैन (१.0)।',
'Legal Approvals': 'कानूनी प्रमाणिकरणहरू',
'Legend': 'विशेष',
'Legend URL': 'विशेष यू.आर.एल.',
'less': 'थोरै',
'Less Options': 'कम्ति विकल्पहरू',
'Level': 'स्तर',
"Level is higher than parent's": 'परिवारको भन्दा स्तर माथि',
'Level of Award': 'परस्कारको स्तर',
'Level of competency this person has with this skill.': 'यो ब्यक्तिसँग भएको सिपको आधारमा प्रतिस्पर्धा को स्तर ।',
'License Number': 'अनुमति-पत्र संख्या',
'light': 'लाइट',
'Link to this result': 'यो नतिजामा जोड्नुहोस्',
'List': 'तालिका',
'List %(site_label)s Status': 'तालिका %(site_स्तर)s अवस्था',
'List Activities': 'कृयाकलापहरूलाई तालिकामा राख्नुहोस्',
'List Activity Organizations': 'कृयाकलाप ब्यबस्थानहरूलाई तालिकामा राख्नुहोस्',
'List Activity Types': 'कृयाकलाप प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Addresses': 'ठेगाना(हरू)लाई तालिकामा राख्नुहोस्',
'List Affiliations': 'स्वीकृती(हरू)लाई तालिकामा राख्नुहोस्',
'List All': 'सबैलाई तालिकामा राख्नुहोस्',
'List All Community Contacts': 'सम्पूर्ण समुदाय सम्पर्कहरूलाई तालिकामा राख्नुहोस्',
'List Annual Budgets': 'वार्षिक बजेटहरूलाई तालिकामा राख्नुहोस्',
'List Awards': 'परस्कारहरूलाई तालिकामा राख्नुहोस्',
'List Beneficiaries': 'भागिदारहरूलाई तालिकामा राख्नुहोस्',
'List Beneficiary Types': 'भागिदार प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Branch Organizations': 'शाखा संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Campaigns': 'क्याम्पिन(हरू)लाई तालिकामा राख्नुहोस्',
'List Certificates': 'प्रमाण-पत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Certifications': 'प्रमाणिकरण(हरू)लाई तालिकामा राख्नुहोस्',
'List Clusters': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Coalitions': 'संस्थाहरूलाई तालिकामा राख्नुहोस्',
'List Communities': 'समुदाय(हरू)लाई तालिकामा राख्नुहोस्',
'List Competency Ratings': 'प्रतिस्पर्धा स्तर(हरू)लाई तालिकामा राख्नुहोस्',
'List Contact Information': 'सम्पर्क जानकारीलाई तालिकामा राख्नुहोस्',
'List Contacts': 'सम्पर्क(हरू)लाई तालिकामा राख्नुहोस्',
'List Course Certificates': 'पाठ्यक्रम प्रमाण-पत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Courses': 'पाठ्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Credentials': 'कागजात(हरू)लाई तालिकामा राख्नुहोस्',
'List Data in Theme Layer': 'स्वरूप तहमा तालिका आँकडा ',
'List Departments': 'मन्त्रालय(हरू)लाई तालिकामा राख्नुहोस्',
'List Deployments': 'परियोजनहरूलाई तालिकामा राख्नुहोस्',
'List Donors': 'दाता(हरू)लाई तालिकामा राख्नुहोस्',
'List Education Details': 'शिक्षा विवरणलाई तालिकामा राख्नुहोस्',
'List Facilities': 'सूबिधाहरूलाई तालिकामा राख्नुहोस्',
'List Facility Types': 'सुविधा प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Feature Layers': 'विशेषता तहज(हरू)लाई तालिकामा राख्नुहोस्',
'List Groups': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Hazards': 'खतराहरूलाई तालिकामा राख्नुहोस्',
'List Hours': 'समय (घण्टा)लाई तालिकामा राख्नुहोस्',
'List Identities': 'परिचयहरूलाई तालिकामा राख्नुहोस्',
'List Images': 'तस्विर(हरू)लाई तालिकामा राख्नुहोस्',
'List Job Titles': 'पद लाई तालिकामा राख्नुहोस्',
'List Jobs': 'काम लाई तालिकामा राख्नुहोस्',
'List Keywords': 'मुख्यशब्द(हरू)लाई तालिकामा राख्नुहोस्',
'List Layers': 'तह(हरू)लाई तालिकामा राख्नुहोस्',
'List Layers in Profile': 'प्रोफाइलको तहहरूलाई तालिकामा राख्नुहोस्',
'List Layers in Symbology': 'चिन्हताको तहहरूलाई तालिकामा राख्नुहोस्',
'List Location Hierarchies': 'स्थान संरचनाहरूलाई तालिकामा राख्नुहोस्',
'List Locations': 'स्थान(हरू)लाई तालिकामा राख्नुहोस्',
'List Log Entries': 'दर्ता भर्नालाई तालिकामा राख्नुहोस्',
'List Logged Time': 'समय विवरणहरूलाई तालिकामा राख्नुहोस्',
'List Mailing Lists': 'ठेगाना सूचीहरूलाई तालिकामा राख्नुहोस्',
'List Map Profiles': 'नक्सा बनावटहरूलाई तालिकामा राख्नुहोस्',
'List Markers': 'चिन्हहरूलाई तालिकामा राख्नुहोस्',
'List Members': 'सदस्यहरूलाई तालिकामा राख्नुहोस्',
'List Membership Types': 'सदस्यता प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Memberships': 'सदस्यताहरूलाई तालिकामा राख्नुहोस्',
'List Milestones': 'उद्देश्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Networks': 'नेटवर्क(हरू)लाई तालिकामा राख्नुहोस्',
'List of Appraisals': 'मुल्यांकन(हरू)को तालिका',
'List of Facilities': 'सूबिधाहरूको तालिका',
'List of Professional Experience': 'ब्यबसायिक अनुभवको तालिका',
'List of Roles': 'भूमिका(हरू)को तालिका',
'List Office Types': 'कार्यलय प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Offices': 'कार्यलय(हरू)लाई तालिकामा राख्नुहोस्',
'List Organization Types': 'संस्था प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Organizations': 'संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Outputs': 'नतिजा(हरू)लाई तालिकामा राख्नुहोस्',
'List Participants': 'सहभागी(हरू)लाई तालिकामा राख्नुहोस्',
'List Partner Organizations': 'साझेदार संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Persons': 'ब्यक्ति(हरू)लाई तालिकामा राख्नुहोस्',
"List Persons' Details": 'ब्यक्तिहरूको विवरणलाई तालिकामा राख्नुहोस्',
'List PoI Types': 'पोलको प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Points of Interest': 'रूचीको बुँदालाई तालिकामा राख्नुहोस्',
'List Policies & Strategies': 'नियम तथा उद्देश्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Profiles configured for this Layer': 'यो तहको लागि प्रोफाइलहरूको बनावटलाई तालिकामा राख्नुहोस्',
'List Programs': 'कार्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Project Organizations': 'परियोजना संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Projections': 'योजना(हरू)लाई तालिकामा राख्नुहोस्',
'List Projects': 'परियोजना(हरू)लाई तालिकामा राख्नुहोस्',
'List Records': 'विवरण(हरू)लाई तालिकामा राख्नुहोस्',
'List Red Cross & Red Crescent National Societies': 'रेड क्रस र रेड क्रिसेन्ट राष्ट्रिय सोसाइटि(हरू)लाई तालिकामा राख्नुहोस्',
'List Regions': 'क्षेत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Response Summaries': 'प्रतिकृया संक्षेप(हरू)लाई तालिकामा राख्नुहोस्',
'List Roles': 'भूमिका(हरू)लाई तालिकामा राख्नुहोस्',
'List Rooms': 'कोठा(हरू)लाई तालिकामा राख्नुहोस्',
'List saved searches': 'संचित खोजीहरूको तालिका',
'List Sectors': 'क्षेत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Services': 'सेवा(हरू)लाई तालिकामा राख्नुहोस्',
'List Skill Equivalences': 'सिप सरह(हरू)लाई तालिकामा राख्नुहोस्',
'List Skill Types': 'सिप प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Skills': 'सिप(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff Assignments': 'कर्मचारी काम(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff Members': 'कर्मचारी सदस्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Statuses': 'अवस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Symbologies': 'चिन्हताहरूलाई तालिकामा राख्नुहोस्',
'List Symbologies for Layer': 'तहको चिन्हता(हरू)लाई तालिकामा राख्नुहोस्',
'List Tasks': 'काम(हरू)लाई तालिकामा राख्नुहोस्',
'List Teams': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Themes': 'स्वरूप(हरू)लाई तालिकामा राख्नुहोस्',
'List Training Events': 'तालिम कार्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Trainings': 'तालिम(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Cluster Positions': 'स्वयम्-सेवक समूह पद(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Cluster Types': 'स्वयम्-सेवक समूह प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Clusters': 'स्वयम्-सेवक समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Roles': 'स्वयम्-सेवक भूमिका(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteers': 'स्वयम्-सेवक(हरू)लाई तालिकामा राख्नुहोस्',
'Live Help': 'जिवन सहयोग',
'Livelihood / CTP': 'जिविका / सि.टि.पि.',
'Livelihood Manager': 'जिविका व्यवस्थापक',
'Livelihoods': 'जिविका(हरू)',
'Load': 'लोड गर्नुहोस्',
'Load Cleaned Data into Database': 'स्पष्ट आँकडालाई आँकडा डाटामा लोड गर्नुहोस्',
'Load Raw File into Grid': 'कच्चा फाइललाई ग्रिडमा लोड गर्नुहोस्',
'Loading': 'लोड हुँदैछ',
'Local Currency': 'स्थानिय मुद्रा',
'Local Name': 'स्थानिय नाम',
'Local Names': 'स्थानिय नाम(हरू)',
'Location': 'स्थान',
'Location added': 'स्थान संचित गरियो',
'Location Added': 'स्थान संचित गरियो',
'Location added to Organization': 'संस्थामा स्थान संचित गरियो',
'Location deleted': 'स्थान हटाइयो',
'Location Deleted': 'स्थान हटाइयो',
'Location Detail': 'स्थानको अक्षांश, देशान्तर',
'Location Details': 'स्थान विवरण',
'Location Found': 'स्थान भेटियो',
'Location Group': 'स्थान समूह',
'Location Hierarchies': 'स्थान संरचनाहरू',
'Location Hierarchy': 'स्थान बनावट',
'Location Hierarchy added': 'स्थान बनावट संचित गरियो',
'Location Hierarchy deleted': 'स्थान बनावट हटाइयो',
'Location Hierarchy Level 1 Name': 'स्थान बनावट स्तर १ नाम',
'Location Hierarchy Level 2 Name': 'स्थान बनावट स्तर २ नाम',
'Location Hierarchy Level 3 Name': 'स्थान बनावट स्तर ३ नाम',
'Location Hierarchy Level 4 Name': 'स्थान बनावट स्तर ४ नाम',
'Location Hierarchy Level 5 Name': 'स्थान बनावट स्तर ५ नाम',
'Location Hierarchy updated': 'स्थान बनावट परिमार्जन गरियो',
'Location is Required!': 'स्थान आवश्यक छ!',
'Location needs to have WKT!': 'स्थानमा डब्लु.के.टि. आवश्यक छ!',
'Location NOT Found': 'स्थान भेटिएन',
'Location removed from Organization': 'संस्थाबाट स्थान हटाइयो ',
'Location updated': 'स्थान परिमार्जन गरियो',
'Locations': 'स्थान(हरू)',
'Locations of this level need to have a parent of level': 'यो स्तरको स्थान(हरू) स्तरसँग सम्बन्धित हुन आवस्यक छ',
'Log entry added': 'दर्ताप्रवेश संचित गरियो',
'Log Entry Deleted': 'दर्ताप्रवेश हटाइयो',
'Log Entry Details': 'दर्ताप्रवेश विवरण',
'Log entry updated': 'दर्ताप्रवेश परिमार्जन गरियो',
'Log New Time': 'नया दर्ता समय',
'Log Time Spent': 'दर्तासमय सकियो',
'Logged Time': 'समय तालिका',
'Logged Time Details': 'समय तालिका विवरण',
'Login': 'लग-इन',
'login': 'लगिन',
'Login using Facebook account': 'फेसबुक एकाउन्ट प्रयोग गरि लग-इन गर्नुहोस्',
'Login using Google account': 'गुगल एकाउन्ट प्रयोग गरि लग-इन गर्नुहोस्',
'Login with Facebook': 'फेसबुक एकाउन्टद्वारा लग-इन गर्नुहोस्',
'Login with Google': 'गुगल एकाउन्टद्वारा लग-इन गर्नुहोस्',
'Logistics & Warehouses': 'वन्दोवस्ती र गोदामघर',
'Logo': 'लोगो',
'Logout': 'बाहिरीनु',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'संस्थाको लोगो । यो png वा jpeg फाइल मा हुनुपर्नेछ र यो ४00x४00 भन्दा ठूलो हुनुपर्छ ।',
'long': 'लामो>',
'Long Name': 'लामो नाम',
'long>12cm': 'लामो>१२ से.मी.',
'Longitude': 'देशान्तर',
'Longitude is Invalid!': 'देशान्तर अमान्य!',
'Longitude is West - East (sideways).': 'देशान्तर पश्चिम-पूर्व (साइडवेज्).',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'केन्द्रबिन्दुमा (ग्रिन्वीच, बेलायत) देशान्तर सून्य हुन्छ र पूर्व तर्फ, यूरोप र एसियामा सकरात्मक हुन्छ । पश्चिमतर्फ एथ्लान्टिक र अमेरीका तर्फ देशान्तर नकरात्मक हुन्छ ।',
'Longitude must be between -180 and 180.': 'देशान्तर -१८0 र १८0 भित्र हुनुपर्छ',
'Longitude of far eastern end of the region of interest.': 'चाहेको पुर्विय क्षेत्रको देशान्तर',
'Longitude of far western end of the region of interest.': 'चाहेको पश्चिमी क्षेत्रको देशान्तर',
'Longitude of Map Center': 'नक्साको केन्द्रबिन्दुको देशान्तर',
'Longitude should be between': 'देशान्तर को मध्येमा हुनुपर्छ',
'Lost': 'हरायो',
'Lost Password': 'पासवर्ड हरायो',
'Low': 'तल्लो',
'Mailing list': 'ठेगाना तालिका',
'Mailing list added': 'ठेगाना तालिका संचित गरियो',
'Mailing list deleted': 'ठेगाना तालिका हटाइयो',
'Mailing List Details': 'ठेगाना तालिका विवरण',
'Mailing List Name': 'ठेगाना तालिका नाम',
'Mailing list updated': 'ठेगाना तालिका परिमार्जन गरियो',
'Mailing Lists': 'ठेगाना तालिकाहरू',
'Main Duties': 'मुख्य जिम्मेवारी(हरू)',
'Main?': 'मुख्य?',
'Mainstreaming DRR': 'विपद् जोखिम न्यूनीकरण मूलप्रवाहीकरण',
'Major': 'प्रमुख',
'male': 'पुरुष',
'Manage Layers in Catalog': 'तालिकाको तह ब्यबस्थापन',
'Manage National Society Data': 'राष्ट्रिय सोसाइटीको तथ्यांक व्यवस्थापन',
'Manage Offices Data': 'कार्यालय(हरु) – कार्यालयहरुको तथ्यांक व्यवस्थापन',
'Manage office inventories and assets.': 'कार्यालय सामग्रीहरुको व्यवस्थापन',
'Manage Staff Data': 'कर्मचारीको तथ्यांक ब्यवस्थापन',
'Manage Teams Data': 'समूहको तथ्यांक व्यवस्थापन',
'Manage Your Facilities': 'तपाईंको सूबिधाहरू ब्यबस्थापन गर्नुहोस्',
'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'भविष्यमा हुने जोखिम र संकटासन्नताको लागि तयार हुन मानव स्रोत तथा सामग्रीहरुको व्यवस्थापन',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'आवश्यक. जिओ सर्भरमा, यो तह नाम हो । डब्ल्यू.एफ.एस. भित्र क्षमता प्राप्त गर्नुहोस्, चिन्हपछीको नाम विशेषताप्रकारको हो (:)।',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'आवश्यक. सेवामा पहुँचको निम्ति आधारभुत यू.आर.एल. जस्तै, http://host.domain/geoserver/wfs?',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'आवश्यक. सेवामा पहुँचको निम्ति आधारभुत यू.आर.एल. जस्तै, http://host.domain/geoserver/wms?',
'Map': 'नक्सा',
'Map cannot display without prepop data!': 'आँकडाबिना नक्सा देखाउन सकिँदैन !',
'Map Center Latitude': 'नक्सा केन्द्रिय अक्षांश',
'Map Center Longitude': 'नक्सा केन्द्रिय देशान्तर',
'Map Profile': 'नक्सा बनावट',
'Map Profile added': 'नक्सा बनावट संचित गरियो',
'Map Profile deleted': 'नक्सा बनावट हटाइयो',
'Map Profile updated': 'नक्सा बनावट परिमार्जन गरियो',
'Map Profiles': 'नक्सा बनावट(हरू)',
'Map has been copied and set as Default': 'नक्सा कपि गरिएको छ र स्वचलानमा रहेको छ',
'Map has been set as Default': 'नक्सा स्वचलानमा रहेको छ',
'Map is already your Default': 'नक्सा पहिलेनै स्वचलानमा रहेको छ',
'Map not available: Cannot write projection file - %s': 'नक्सा उपलब्ध छैन: योजना फाइल राख्न सकिँदैन- %s',
'Map not available: No Projection configured': 'नक्सा उपलब्ध छैन: कुनैपनि योजना बनावट छैन',
'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'नक्सा उपलब्ध छैन: योजना %(projection)s मिलेन - कृपया %(path)s मा परिभाषा राख्नुहोस्',
'Map of Communities': 'समुदाय(हरू)को नक्सा',
'Map of Facilities': 'सूबिधाहरूको नक्सा',
'Map of Offices': 'कार्यलयहरूको नक्सा',
'Map of Projects': 'परियोजनाहरूको नक्सा',
'Map of Resources': 'स्रोत(हरू)को नक्सा',
'Map Settings': 'नक्सा सेटिङ(हरू)',
'Map Viewing Client': 'नक्सा हेर्ने प्रयोगकर्ता',
'Map Zoom': 'नक्सा जुम',
'Marital Status': 'वैवाहिक अवस्था',
'Mark as duplicate': 'नक्कल प्रतिकोरूपमा चिन्ह लगाउनुहोस्',
'Marker': 'चिन्ह',
'Marker added': 'चिन्ह संचित गरियो',
'Marker deleted': 'चिन्ह हटाइयो',
'Marker Details': 'चिन्ह विवरण',
'Marker updated': 'चिन्ह परिमार्जन गरियो',
'Markers': 'चिन्ह(हरू)',
'Markets/Marketing Analysis, Linkages and Support': 'बजार(हरू)/बजारिकरण अनुसन्धान, सम्पर्कहरू र सहयोग',
'married': 'विवाहित',
'Matching Records': 'मिलेको विवरण(हरू)',
'Max': 'बढीमा',
'Maximum Extent': 'बढी मात्रा',
'Maximum Location Latitude': 'बढी स्थान अक्षांश',
'Maximum Location Longitude': 'बढी स्थान देशान्तर',
'Maximum must be greater than minimum': 'बढि, कम्तिभन्दा धेरै हुनुपर्छ',
'Measure Area: Click the points around the polygon & end with a double-click': 'नाप क्षेत्र: बहुभुजा वरिपरिको बिन्दुहरूमा क्लिक गर्नुहोस् र दूइपटक क्लीक गरेर अन्त्य गर्नुहोस्',
'Measure Length: Click the points along the path & end with a double-click': 'नाप लम्बाइ: बाटोको वरिपरिको बिन्दुहरूमा क्लिक गर्नुहोस् र दूइपटक क्लीक गरेर अन्त्य गर्नुहोस्',
'Medical Conditions': 'मेडिकल अवस्था(हरू)',
'Medical Services': 'चिकित्सा सेवा',
'Medical Supplies and Equipment': 'मेडिकल पुर्ती(हरू) र सामाग्री',
'Media': 'सञ्चार माध्यम',
'medium': 'मध्य',
'Medium': 'मध्य',
'medium<12cm': 'मध्य<१२cm',
'Member': 'सदस्य',
'Member added': 'सदस्य संचित गरियो',
'Member deleted': 'सदस्य हटाइयो',
'Member Details': 'सदस्य विवरण',
'Member ID': 'सदस्य आइ.डि.',
'Member Organizations': 'सदस्य संस्थाहरू',
'Member updated': 'सदस्य परिमार्जन गरियो',
'Members': 'सदस्यहरू',
'Membership': 'सदस्यता',
'Membership added': 'सदस्यता संचित गरियो',
'Membership Approved': 'सदस्यता शुल्क तिरेको मिति',
'Membership deleted': 'सदस्यता हटाइयो',
'Membership Details': 'सदस्यता विवरण',
'Membership Fee': 'सदस्यता शुल्क',
'Membership Type added': 'सदस्यता प्रकार संचित गरियो',
'Membership Type deleted': 'सदस्यता प्रकार हटाइयो',
'Membership Type Details': 'सदस्यता प्रकार विवरण',
'Membership Type updated': 'सदस्यता प्रकार परिमार्जन गरियो',
'Membership Types': 'सदस्यता प्रकार(हरू)',
'Membership updated': 'सदस्यता परिमार्जन गरियो',
'Memberships': 'सदस्यता(हरू)',
'Menu': 'मेनु',
'Merge': 'एकै गर्नुहोस्',
'Merge records': 'विवरण(हरू) एकै गर्नुहोस्',
'Message': 'सन्देश',
'Method disabled': 'शैली निस्कृय गरियो',
'MGRS Layer': 'एम.जि.आर.एस. तह',
'Middle Name': 'बीचको नाम',
'Milestone': 'उद्देश्य',
'Milestone Added': 'उद्देश्य संचित गरियो',
'Milestone Deleted': 'उद्देश्य हटाइयो',
'Milestone Details': 'उद्देश्य विवरण',
'Milestone Updated': 'उद्देश्य परिमार्जन गरियो',
'Milestones': 'उद्देश्य(हरू)',
'Military': 'सैनिक',
'Min': 'कम्ति',
'Minimum Location Latitude': 'कम्ति स्थान अक्षांश',
'Minimum Location Longitude': 'कम्ति स्थान देशान्तर',
'Minute': 'मिनेट',
'Minutes must be a number.': 'मिनेट संख्यामा नै हुनुपर्छ ।',
'Minutes must be less than 60.': 'मिनेट ६0 भन्दा कम हुनुपर्छ ।',
'Missing': 'हराइरहेको',
'missing': 'हराएको',
'Mission': 'मिसन',
'Missions': 'मिसन(हरू)',
'Mobile': 'मोबाइल',
'Mobile Health Units': 'मोबाइल स्वास्थ्य इकाई(हरू)',
'Mobile Phone': 'मोबाइल फोन',
'Mobile Phone Number': 'मोबाइल फोन नम्बर',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'विशेषता परिवर्तन: परिवर्तन गर्न चाहानुभएको फारमलाई छान्नुहोस् र बिन्दुमध्येबाट एउटालाई घिस्याएर तपाईँले चाहे अनुसार राख्नुहोस् ।',
'mongoloid': 'मोनोलोइड',
'Monitoring and Evaluation': 'अनुगमन तथा मूल्यांकन',
'Month': 'महिना',
'Monthly': 'महिनावरी',
'more': 'थप',
'More Info': 'थप जानकारी',
'More Options': 'थप विकल्प(हरू)',
'more...': 'थप...',
'Morgue': 'मुर्दाघर',
'Moustache': 'श्मश्री',
'Move Feature: Drag feature to desired location': 'विशेषता सार्नुहोस्: विशेषतालाई चाहेको स्थानमा घिसार्नुहोस्',
'Multiple': 'बहुमुखी',
'Muslim': 'मुस्लिम',
'Must a location have a parent location?': 'स्थानको पारिवारिक स्थान हुनुपर्छ?',
'My Logged Hours': 'मेरो समय (घण्टा) तालिका',
'My Maps': 'मेरो नक्साहरू',
'My Open Tasks': 'मेरो खुला कामहरू',
'My Profile': 'मेरो प्रोफाइल',
'My Tasks': 'मेरो कामहरू',
'Name': 'नाम',
'Name and/or ID': 'नाम र/वा आइ.डि.',
'Name field is required!': 'नाम क्षेत्र आवश्यक छ!',
'Name of a programme or another project which this project is implemented as part of': 'कार्यक्रमको नाम वा अर्को परियोजना जस्मा यो परियोजना एउटा भागको रूपमा समावेस छ',
'Name of Award': 'परस्कारको नाम',
'Name of Father': 'बाबुको नाम',
'Name of Institute': 'शैक्षिक संस्थाको नाम',
'Name of Map': 'नक्साको नाम',
'Name of Mother': 'आमाको नाम',
'Name of the person in local language and script (optional).': 'स्थानिय भाषा तथा लिपिमा ब्यक्तिको नाम (वैकल्पिक) ।',
'National': 'राष्ट्रिय',
'National ID Card': 'राष्ट्रिय आइ.डि. कार्ड',
'National Societies': 'राष्ट्रिय सोसाइटी(हरू)',
'National Society': 'राष्ट्रिय सोसाइटी',
'National Society / Branch': 'राष्ट्रिय सोसाइटी / शाखा',
'National Society added': 'राष्ट्रिय सोसाइटी संचित गरियो',
'National Society deleted': 'राष्ट्रिय सोसाइटी हटाइयो',
'National Society Details': 'राष्ट्रिय सोसाइटी विवरण',
'National Society updated': 'राष्ट्रिय सोसाइटी परिमार्जन गरियो',
'Nationality': 'राष्ट्रियता',
'Nationality of the person.': 'ब्यक्तिको राष्ट्रियता',
'NDRT (National Disaster Response Teams)': 'राष्ट्रिय विपद् प्रतिकार्य समूहहरु',
"Need a 'url' argument!": "यू.आर.एल.' को विश्लेषण !",
'Needs': 'आवश्यकताहरू',
'negroid': 'नेग्रोइड',
'Network added': 'नेटवर्क संचित गरियो',
'Network Details': 'नेटवर्क विवरण',
'Network removed': 'नेटवर्क हटाइयो',
'Network updated': 'नेटवर्क परिमार्जन गरियो',
'Networks': 'नेटवर्क(हरू)',
'Never': 'कहिल्यपनि',
'New': 'नयाँ',
'new ACL': 'नयाँ एसिएल',
'New Annual Budget created': 'नयाँ वार्षिक बजेट बनाइयो',
'New Deployment': 'नयाँ परियोजन',
'New Entry': 'नयाँ प्रवेश',
'New Hazard': 'नयाँ खतरा',
'New Location': 'नयाँ स्थान',
'New Organization': 'नयाँ संस्था',
'Add Output': 'नयाँ नतिजा',
'New Post': 'नयाँ लेख',
'New Records': 'नयाँ विवरण(हरू)',
'New Role': 'नयाँ भूमिका',
'New Sector': 'नयाँ क्षेत्र',
'New Service': 'नयाँ सेवा',
'New Theme': 'नयाँ स्वरूप',
'New updates are available.': 'नयाँ परिमार्जनहरू उपलब्ध छन्',
'News': 'समाचारहरू',
'Next': 'अर्को',
'Next run': 'अर्को रन',
'Next View': 'अर्को भिउ',
'NGO': 'सरकारी संस्था',
'no': 'छैन/होइन',
'No': 'छैन/हुँदैन/पर्दैन',
'No access to this record!': 'यो विवरणमा कुनै पहुँच छैन!',
'No Activities Found': 'कुनै कृयाकलापहरू प्राप्त भएन',
'No Activity Organizations Found': 'कुनैपनि कृयाकलाप ब्यबस्थापन प्राप्त भएन',
'No Activity Types Found': 'कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Activity Types found for this Activity': 'यसको लागि कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Activity Types found for this Project Location': 'यो परियोजना स्थानको लागि कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Affiliations defined': 'कुनैपनि स्वीकृती परिभाषित गर्न सकिएन',
'No annual budgets found': 'कुनैपनि वार्षिक बजेट(हरू) प्राप्त भएन',
'No Appraisals found': 'कुनैपनि मुल्यांकनहरू प्राप्त भएन',
'No Awards found': 'कुनैपनि परस्कारहरू प्राप्त भएन',
'No Base Layer': 'कुनैपनि आधारभुत तह छैन',
'No Beneficiaries Found': 'कुनैपनि भागिदारहरू प्राप्त भएन',
'No Beneficiary Types Found': 'कुनैपनि भागिदार प्रकारहरू प्राप्त भएन',
'No Branch Organizations currently registered': 'कुनैपनि शाखा संस्थाहरू हाल दर्ता गरिएको छैन',
'No Campaigns Found': 'कुनैपनि क्याम्पिनहरू प्राप्त भएन',
'No Clusters currently registered': 'कुनैपनि समूहहरू हाल दर्ता गरिएको छैन',
'No Coalitions currently recorded': 'हाल कुनैपनि संस्थाहरूको विवरण राखिएको छैन',
'No Communities Found': 'कुनैपनि समुदाय(हरू) प्राप्त भएन',
'No contact information available': 'कुनैपनि सम्पर्क जानकारी उपलब्ध छैनन्',
'No contact method found': 'कुनैफनि सम्पर्क शैली प्राप्त भएन',
'No Contacts currently registered': 'हाल कुनैपनि सम्पर्कहरू दर्ता गरिएको छैन',
'No Contacts Found': 'कुनैपनि सम्पर्कहरू प्राप्त भएन',
'No data available': 'कुनैपनि आँकडा उपलब्ध छैन',
'No data available in table': 'तालिकामा कुनैपनि आँकडा उपलब्ध छैन',
'No Data currently defined for this Theme Layer': 'यो स्वरूप समूहको लागि कुनैपनि आँकडाहारू छैनन्',
'No Deployments currently registered': 'हाल कुनैपनि परियोजनाहरू दर्ता गरिएको छैन',
'No Donors currently registered': 'हाल कुनैपनि दाताहरू दर्ता गरिएको छैन',
'No education details currently registered': 'हाल कुनैपनि शिक्षा विवरण हाल दर्ता गरिएको छैन',
'No entries currently available': 'हाल कुनैपनि डाटा छैन',
'No entries found': 'कुनैपनि डाटा प्राप्त भएन',
'No entry available': 'कुनैपनि प्रवेश उपलब्ध छैनन्',
'No Facilities currently registered': 'हाल कुनैपनि सूबिधाहरू दर्ता गरिएको छैन',
'No Facility Types currently registered': 'हाल कुनैपनि सुविधा प्रकारहरू दर्ता गरिएको छैन',
'No Feature Layers currently defined': 'हाल कुनैपनि विशेषता तहहरू हाल परिभाषित गरिएको छैन',
'No forms to the corresponding resource have been downloaded yet.': 'अहिलेसम्म सम्बन्धित स्रोतको कुनैपनि फारमहरू अपलोड गरिएको छैन',
'No further users can be assigned.': 'थप प्रयोगकर्ता समावेस गर्न सकिँदैन',
'No Groups currently registered': 'हाल कुनैपनि समूहहरू हाल दर्ता गरिएको छैन',
'No Hazards currently registered': 'हाल कुनैपनि खतराहरू हाल दर्ता गरिएको छैन',
'No Hazards found for this Project': 'हाल यस परियोजनाको लागि कुनैपनि खतराहरू छैनन्',
'No Identities currently registered': 'हाल कुनैपनि परिचयहरू दर्ता गरिएको छैन',
'No Images currently registered': 'हाल कुनैपनि तस्विरहरू दर्ता गरिएको छैन',
'No jobs configured': 'कुनैपनि कामहरू मिलाइएको छैन',
'No jobs configured yet': 'हालसम्म कुनैपनि कामहरू मिलाइएको छैन',
'No Keywords Found': 'कुनैपनि मुख्यशब्द(हरू) प्राप्त भएन ',
'No Layers currently configured in this Profile': 'हाल यो प्रोफाइलको लागि हाल कुनैपनि तहहरू बनावट रहेको छैन',
'No Layers currently defined': 'हाल कुनैपनि तहहरू परिभाषित गरिएको छैन',
'No Layers currently defined in this Symbology': 'यो चिन्हताको लागि हाल कुनैपनि तहहरू परिभाषित गरिएको छैन',
'No Location Hierarchies currently defined': 'हाल कुनैपनि स्थान संरचनाहरू परिभाषित गरिएको छैन',
'No location information defined!': 'कुनैपनि स्थान जानकारी परिभाषित गरिएको छैन!',
'No Locations currently available': 'हाल कुनैपनि स्थानहरू उपलब्ध हुन सकेन',
'No Locations Found': 'कुनैपनि स्थानहरू प्राप्त भएन',
'No Locations found for this Organization': 'यो संस्थाको लागि कुनैपनि स्थानहरू प्राप्त भएन',
'No Mailing List currently established': 'हाल कुनैपनि ठेगाना तालिका राखिएको छैन',
'No Map Profiles currently defined': 'हाल कुनैफनि नक्सा बनावटहरू परिभाषित गरिएको छैन',
'No Markers currently available': 'हाल कुनैपनि चिन्हहरू उपलब्ध छैण',
'No match': 'कुनै मिलेन',
'No matching element found in the data source': 'आँकडा स्रोतमा कुनैपनि मिल्ने कुरा प्राप्त भएको छैन',
'No Matching Records': 'कुनैपनि मिल्दो विवरणहरू छैनन्',
'No matching records found': 'कुनैपनि मिल्ने विवरण(हरू) प्राप्त भएको छैन',
'No Members currently registered': 'हाल कुनैफनि सदस्यहरू दर्ता गरिएको छैन',
'No members currently registered': 'हाल कुनैपनि सदस्यहरू दर्ता गरिएको छैन',
'No membership types currently registered': 'हाल कुनैपनि सदस्यता प्रकार(हरू) दर्ता गरिएको छैन',
'No Memberships currently registered': 'हाल कुनैपनि सदस्यताहरू दर्ता गरिएको छैन',
'No Milestones Found': 'कुनैपनि उद्देश्यहरू प्राप्त भएन',
'No Networks currently recorded': 'हाल कुनैपनि नेटवर्कहरूका विवरण दिइएको छैन',
'No Office Types currently registered': 'हाल कुनैपनि कार्यलय प्रकारहरू दर्ता गरिएको छैन',
'No Offices currently registered': 'हाल कुनैपनि कार्यलयहरू दर्ता गरिएको छैन',
'No Open Tasks for %(project)s': '%(project)s को लागि हाल कुनैपनि खुला कामहरू छैनन्',
'No options available': 'कुनैपनि विकल्पहरू उपलब्ध छैनन्',
'no options available': 'कुनैपनि विकल्पहरू उपलब्द छैनन्',
'No options currently available': 'हाल कुनैपनि विकल्पहरू उपलब्ध छैनन्',
'No Organization Types currently registered': 'हाल कुनैपनि संस्था प्रकारहरू दर्ता गरिएको छैन',
'No Organizations currently registered': 'हाल कुनैपनि संस्थाहरू दर्ता गरिएको छैन',
'No Organizations for Project(s)': 'परियोजना(हरू)को निम्ति कुनैपनि संस्थाहरू छैनन्',
'No Organizations found for this Policy/Strategy': 'यो नियम/उद्देश्यको निम्ति कुनैपनि संस्था(हरू) प्राप्त भएन',
'No outputs defined': 'कुनैपनि नतिजाहरू प्राप्त भएन',
'No Partner Organizations currently registered': 'हाल कुनैफनि साझेदार संस्था(हरू) दर्ता गरिएको छैन',
'No Persons currently registered': 'हाल कुनैपनि ब्यक्तिहरू दर्ता गरिएको छैन',
'No PoI Types currently available': 'हाल कुनैपनि पोल प्रकारहरू उपलब्ध छैनन्',
'No Points of Interest currently available': 'हाल कुनैफनि रूचीको बुँदा उपलब्ध छैनन्',
'No PoIs available.': 'कुनै पोलहरू उपलब्ध छैनन्',
'No Policies or Strategies found': 'कुनैपनि नियम तथा लक्षहरू प्राप्त भएन',
'No Presence Log Entries currently registered': 'हाल कुनैपनि उपस्थिति दर्ताहरू दर्ता गरिएको छैन',
'No Professional Experience found': 'कुनैपनि ब्यबसायिक अनुभव प्राप्त भएन',
'No Profiles currently have Configurations for this Layer': 'यो तहको लागि हाल कुनैपनि प्रोफाइलहरूको बनावट छैनन्',
'No Projections currently defined': 'हाल कुनैपनि योजनाहरू परिभाषित गरिएको छैन',
'No Projects currently registered': 'हाल कुनैपनि परियोजनाहरू दर्ता गरिएको छैन',
'No Ratings for Skill Type': 'सिप प्रकारको लागि कुनैपनि स्तरहरू छैनन्',
'No Records currently available': 'हाल कुनैपनि विवरणहरू उपलब्ध छैनन्',
'No records found': 'अभिलेख उपलब्ध नभएको',
'No records in this resource': 'यो स्रोतमा कुनैपनि विवरणहरू छैनन्',
'No records in this resource. Add one more records manually and then retry.': 'यो स्रोतमा कुनैपनि विवरण छैनन् । विस्तृतरूपमा थप विवरण राख्नुहोस् र त्यसपछि पुन: प्रयास गर्नुहोस्',
'No records to review': 'पुर्न अवलोकनको लागि कुनै विवरण(हरू) छैनन्',
'No Red Cross & Red Crescent National Societies currently registered': 'हाल कुनैपनि रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटि(हरू) दर्ता गरिएको छैन',
'No Regions currently registered': 'हाल कुनैपनि क्षेत्रहरू दर्ता गरिएको छैन',
'No report specified.': 'कुनैपनि प्रतिवेदन उल्लेख गरिएको छैन',
'No Resource Types defined': 'कुनैपनि स्रोत प्रकारहहरू परिभाषित गरिएको छैन',
'No Resources in Inventory': 'लेखा विवरणमा कुनैपनि स्रोतहरू छैनन्',
'No Response': 'कुनै प्रतिकृया छैन',
'No Response Summaries Found': 'कुनैपनि प्रतिकृया संक्षिप्त प्राप्त भएन',
'No Restrictions': 'कुनैपनि बाधाहरू छैनन्',
'No role to delete': 'हटाउनको लागि कुनै भूमिका छैनन्',
'No roles currently assigned to this user.': 'यस प्रयोगकर्ताको लागि हाल कुनैपनि भूमिकाहरू मिलाइएको छैन ।',
'No Roles defined': 'कुनैपनि भूमिकाहरू परिभाषित गरिएका छैनन्',
'No Rooms currently registered': 'हाल कुनैपनि कोठाहरू दर्ता गरिएको छैनन्',
'No Search saved': 'कुनैपनि खोजी संचित भएको छैन',
'No Sectors currently registered': 'हाल कुनैपनि क्षेत्रहरू दर्ता गरिएको छैन',
'No Sectors found for this Organization': 'यो संस्थाको लागि कुनैपनि क्षेत्र(हरू) प्राप्त भएन',
'No Sectors found for this Project': 'यो परियोजनाको लागि कुनैपनि क्षेत्रहहरू छैनन्',
'No Sectors found for this Theme': 'यो स्वरूपको लागि कुनैपनि क्षेत्रहरू प्राप्त भएन',
'No Services currently registered': 'हाल कुनैपनि सेवाहरू दर्ता गरिएको छैन',
'No Services found for this Organization': 'यो संस्थाको लागि कुनैपनि सेवाहरू प्राप्त भएन',
'No Staff currently registered': 'हाल कुनैपनि कर्मचारी दर्ता गरिएको छैन',
'No staff or volunteers currently registered': 'हाल कुनैपनि कर्मचारी वा स्वयम्-सेवकहरू हाल दर्ता गरिएको छैन',
'No Statuses currently registered': 'हाल कुनैपनि अवस्थाहरू दर्ता गरिएको छैन',
'No Symbologies currently defined': 'हाल कुनैपनि चिन्हताहरू परिभाषित गरिएको छैन',
'No Symbologies currently defined for this Layer': 'यो तहको लागि हाल कुनैपनि चिन्हताहरू परिभाषित गरिएको छैन',
'No Tasks Assigned': 'कुनैपनि कामहरू लगाइएको छैन',
'No tasks currently registered': 'हाल कुनैपनि कामहरू दर्ता गरिएको छैन',
'No Teams currently registered': 'हाल कुनैफनि समूहहरू दर्ता गरिएको छैन',
'No Themes currently registered': 'हाल कुनैपनि स्वरूपहरू दर्ता गरिएको छैन',
'No Themes found for this Activity': 'यो कृयाकलापको लागि कुनैपनि स्वरूप प्राप्त भएन',
'No Themes found for this Project': 'यो परियोजनाको निम्ति कुनैपनि स्वरूपहरू प्राप्त भएन',
'No Themes found for this Project Location': 'यो परियोजना स्थानको लागि कुनैपनि स्वरूपहरू प्राप्त भएन',
'No Time Logged': 'कुनैपनि समय सूची छैन',
'No time stamps found in this resource': 'यस स्रोतको लागि कुनैपनि समय छाप प्राप्त भएन',
'No users with this role at the moment.': 'यो भूमिकामा हाल कुनैपनि प्रयोगकर्ता छैनन्',
"No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": "कुनैपनि यू.टि.सि. समस्या प्राप्त भएन । कृपया तपाईंको 'प्रयोगकर्ता प्रोफाइल' विवरणमा यू.टि.सि. समस्या राख्नुहोस् । उदाहारण यू.टि.सि.+0५३0",
'No Volunteer Cluster Positions': 'कुनैपनि स्वयम्-सेवक समूह पदहरू छैनन्',
'No Volunteer Cluster Types': 'कुनैपनि स्वयम्-सेवक समूह प्रकारहरू छैनन्',
'No Volunteer Clusters': 'कुनैफनि स्वयम्-सेवक समूहहरू छैनन्',
'No Volunteers currently registered': 'हाल कुनैपनि स्वयम्-सेवकहरू दर्ता गरिएको छैन',
'Non-Communicable Diseases': 'नसर्ने रोगहरु',
'none': 'कुनैपनि होइन',
'None': 'कुनैपनि',
'NONE': 'खाली',
'None (no such record)': 'कुनैपनि (कुनैपनि मिल्दो विवरण छैन)',
'None of the above': 'माथिको कुनैपनि होइन',
'Nonexistent or invalid resource': 'अस्थित्वमा नभएको वा अमान्य स्रोत',
'Normal': 'साधारण',
'Normal Job': 'साधारण काम',
'NOT %s AND NOT %s': ' %s होइन र %s होइन',
'NOT %s OR NOT %s': ' %s होइन र %s होइन',
'Not Authorized': 'स्वीकृती गरिएको छैन',
'Not implemented': 'लागु गरिएको छैन',
'Not installed or incorrectly configured.': 'इन्स्टल गरिएको छैन वा गलत बनावट दिइको ।',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'याद गर्नुहोस्, यो तालिकाले सकृय स्वयम्-सेवकहरू मात्र देखाउँदछ । दर्ता भएको सबैलाई हेर्नको निम्ति, यहाँबाट खोजी गर्नुहोस् ।',
'Note that when using geowebcache, this can be set in the GWC config.': 'याद गर्नुहोस्, जिओवेवकेच प्रयोग गर्दा, यसलाई जिडब्लुसि बनावटमा राख्न सकिन्छ ।',
'Notification frequency': 'सूचना घण्टि पराध्वनी',
'Notification method': 'सूचना घण्टि शैली',
'Notify': 'जानकारी',
'Number': 'संख्या',
'Number of Activities': 'कृयाकलापहरूको संख्या',
'Number of Beneficiaries': 'भागिदारहरूको संख्या',
'Number of Countries': 'देशहरूको संख्या',
'Number of Deployments': 'परियोजनाहरूको संख्या',
'Number of Disaster Types': 'प्रकोप प्रकारहरूको संख्या',
'Number of Facilities': 'सूबिधाहरूको संख्या',
'Number of Missions': 'मिस्सनहरूको संख्या',
'Number of People Affected': 'प्र्रभावितको संख्या',
'Number of People Dead': 'मृतकको संख्या',
'Number of People Injured': 'घाइतेको संख्या',
'Number of Responses': 'प्रतिकृयाहरूको संख्या',
'Number or Label on the identification tag this person is wearing (if any).': 'ब्यक्तिले लगाइराखेको खण्डमा, परिचयपत्रको संख्या वा स्तर',
'Nutrition': 'पोषण',
'Nutritional Assessments': 'खाद्द मुल्यंकन',
'Object': 'वस्तु',
'Objectives': 'उद्देश्यहरु',
'Observer': 'निरिक्षणकर्ता',
'obsolete': 'ढिलो',
'Obsolete': 'ठोस',
'OCR Form Review': 'ओ.सि.आर. फारम पुर्नअवलोकन',
'OCR module is disabled. Ask the Server Administrator to enable it.': 'ओ.सि.आर. भाग निस्कृय गरियो । यसलाई सकृय गर्नको निम्ति सेवा संचालकलाई सम्पर्क गर्नुहोस् ।',
'OCR review data has been stored into the database successfully.': 'आँकडाआधारभुतमा ओ.सि.आर. पुर्नअवलोकन आँकडा पूर्णरूपले संचित भयो ।',
'OD Coordinator': 'ओ.डि. प्रतिनिधी',
'Office': 'कार्यालय',
'Office added': 'कार्यालय संचित गरियो',
'Office Address': 'कार्यालय ठेगाना',
'Office deleted': 'कार्यालय हटाइयो',
'Office Details': 'कार्यालय विवरण',
'Office Phone': 'कार्यालय फोन',
'Office Type': 'कार्यालय प्रकार',
'Office Type added': 'कार्यालय प्रकार संचित गरियो',
'Office Type deleted': 'कार्यालय प्रकार हटाइयो',
'Office Type Details': 'कार्यालय प्रकार विवरण',
'Office Type updated': 'कार्यालय प्रकार परिमार्जन गरियो',
'Office Types': 'कार्यालय प्रकारहरु',
'Office updated': 'कार्यालय परिमार्जन गरियो',
'Offices': 'कार्यालयहरु',
'Office/Warehouse/Facility': 'कार्यालय, गोदामघर, सुविधा',
'OK': 'हुन्छ',
'on %(date)s': ' %(date)s मा',
'On by default?': 'स्वचलानमा रहेकोअनुसार खुला?',
'On Hold': 'होल्डमा राखिएको छ',
'Only showing accessible records!': 'पहुँचमा रहेको विवरणहरू मात्र देखाइएको !',
'Opacity': 'क्षमता',
'Open': 'खुला',
'Open Chart': 'खुला तालिका',
'Open Incidents': 'खुला भवितब्यहरू',
'Open Map': 'खुला नक्सा',
'Open recent': 'खुला भर्खरैको',
'Open Report': 'खुला प्रतिवेदन',
'Open Table': 'खुला तालिका',
'Open Tasks for %(project)s': '%(project)sको लागि खुला कामहरू',
'Open Tasks for Project': 'परियोजनाको लागि खुला कामहरू',
'Opening Times': 'खुलाहुने समय(हरू)',
'OpenStreetMap Layer': 'खुलासडकनक्सा तह',
'OpenStreetMap OAuth Consumer Key': 'खुलासडकनक्सा ग्राहक चाबि',
'OpenStreetMap OAuth Consumer Secret': 'खुलासडकनक्सा ग्राहक गोप्यता',
'OpenStreetMap (Humanitarian)': 'खुल्ला सडक नक्सा (मानवीय)',
'OpenStreetMap (MapQuest)': 'खुल्ला सडक नक्सा कोयष्ट',
'OpenWeatherMap Layer': 'खुला मौसम नक्सा तह',
'Operation not permitted': 'कृयाकलाप अनुमति छैन',
'Optional password for HTTP Basic Authentication.': 'एच.टि.टि.पि. आधार्भूत पुष्टिको लागि वैकल्पिक पासवर्ड ।',
'Optional selection of a background color.': 'पृष्ठभूमिको लागि रंग वैकल्पिक छनौट',
'Optional selection of a MapServer map.': 'नक्सासर्वर नक्साको लागि वैकल्पिक छनौट',
'Optional selection of an alternate style.': 'उल्टो तरिकाको लागि वैकल्पिक छनौट',
'Optional username for HTTP Basic Authentication.': 'एच.टि.टि.पि. आधार्भूत पुष्टिको लागि वैकल्पिक प्रयोगकर्ताको नाम',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'वैकल्पिक। जिओ सर्वरमा, काम स्थान, नाम स्थान यू.आर.आइ. (नाम होइन!) । डब्ल्यू.एफ.एस. भित्र क्षमता प्राप्त गर्नुहोस्, काम स्थान चिन्ह अगाडिको भाग विशेषत प्रकारको नाम हो (:)।',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'वैकल्पिक । कुनै वस्तुको नाम, जस्मा समावेश कुरा पप्-अपमा राखिएको एउटा तस्विर फाइलको यू.आर.एल. हो ।',
'Optional. The name of an element whose contents should be put into Popups.': 'वैकल्पिक । कुनै वस्तुको नाम जसमा समावेश कुरा पप्-अपमा राखिएको हुन्छ ।',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "वैकल्पिक । ज्यामिती भागको नाम । पोष्ट जि.आइ.एस. मा यो स्वचलानमा 'the_geom' रहेको हुन्छ ।",
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'वैकल्पिक । योजनाको नाम । जियो सर्वरमा यसको फारम हुन्छ । http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'or': 'वा',
'Organisational Preparedness - Nhq and Branches': 'संस्थागत पुर्वतयारी-एन.एच.क्यू. र शाखाहरू',
'Organization': 'संस्था',
'Organization added': 'संस्था संचित गरियो',
'Organization added to Policy/Strategy': 'नियम/उद्देश्यमा संस्था संचित गरियो ',
'Organization added to Project': 'परियोजनामा संस्था संचित गरियो',
'Organization deleted': 'संस्था हटाइयो',
'Organization Details': 'संस्था विवरण',
'Organization group': 'संस्था समूह',
'Organization removed from Policy/Strategy': 'नियम/उद्देश्यबाट संस्था हटाइयो',
'Organization removed from Project': 'परियोजनाबाट संस्था हटाइयो',
'Organization Type': 'संस्था प्रकार',
'Organization Type added': 'संस्था प्रकार संचित गरियो',
'Organization Type deleted': 'संस्था प्रकार हटाइयो',
'Organization Type Details': 'संस्था प्रकार विवरण',
'Organization Type updated': 'संस्था प्रकार परिमार्जन गरियो',
'Organization Types': 'संस्था प्रकारहरू',
'Organization Units': 'संस्था इकाईहरू',
'Organization updated': 'संस्था परिमार्जन गरियो',
'Organization(s)': 'संस्था(हरू)',
'Organization/Branch': 'संस्था/शाखा',
'Organizational Development': 'संस्थागत विकास',
'Organizations': 'संस्थाहरू',
'Organizations / Teams / Facilities': 'संस्थाहरू/ समूहहरू / सूबिधाहरू',
'Origin': 'मुख्य',
'Original': 'सक्कल प्रति',
'OSM file generation failed!': 'ओ.एस.एम. फाइल प्रकृया असफल !',
'OSM file generation failed: %s': 'ओ.एस.एम. फाइल प्रकृया असफल: %s',
'Other': 'अन्य',
'other': 'अन्य',
'Other Address': 'अन्य ठेगाना',
'Other Details': 'अन्य विवरण',
'Other Users': 'अन्य प्रयोगकर्ताहरू',
'Others': 'अन्यहरू',
'Outcomes, Impact, Challenges': 'नतिजा, प्रभाव, चुनौतीहरू',
'Output': 'नतिजा',
'Output added': 'नतिजा संचित गरियो',
'Output deleted': 'नतिजा हटाइयो',
'Output updated': 'नतिजा परिमार्जन गरियो',
'Outputs': 'नतिजाहरू',
'Outreach Staff': 'बाहिर खटाइएको कर्मचारी',
'overdue': 'ढिलो',
'Overlays': 'प्रमुखरूपमा',
'Owned Records': 'प्राप्त विवरणहरू',
'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'मौसम परिवर्तनको निम्ति कार्यको निम्ति प्यासिफिक आइर्ल्याण्डहरूको प्रयास् । प्यासिफिक देशहरूको परियोजनाहरूमा मात्र लागु हुने ।',
'Page': 'पृष्ठ',
'paid': 'भुक्तानी भयो',
'Paid': 'भुक्तानी भयो',
'Pan Map: keep the left mouse button pressed and drag the map': 'प्यान नक्सा: वायाँ माउस बटन थिचिराख्नुहोस् र नक्सालाई घिसार्नुहोस् ।',
'Parent': 'परिवार',
"Parent level should be higher than this record's level. Parent level is": 'यो विवरणको स्तर भन्दा परिवारको स्तर माथि हुनुपर्छ । परिवार स्तर, हो',
'Parent needs to be of the correct level': 'परिवार सहि स्तरमा हुन आवश्यक छ',
'Parent needs to be set': 'परिवारलाइ राखिनुपर्छ',
'Parent needs to be set for locations of level': 'परिवारलाई स्तरको स्थानको निम्ति राखिनुपर्छ',
'Part of the URL to call to access the Features': 'विशेषताहरूमा पहुँचको निम्ति यू.आर.एल.को भाग',
'Participant': 'सहभागी',
'Participant added': 'सहभागी संचित गरियो',
'Participant deleted': 'सहभागी हटाइयो',
'Participant Details': 'सहभागी विवरण',
'Participant updated': 'सहभागी परिमार्जन गरियो',
'Participants': 'सहभागीहरू',
'Participatory Hygiene Promotion': 'सहभागिमुलक स्वास्थ्य बढुवा',
'Partner': 'साझेदार',
'Partner National Society': 'साझेदार राष्ट्रिय समाज',
'Partner Organization added': 'साझेदार संस्था संचित गरियो',
'Partner Organization deleted': 'साझेदार संस्था हटाइयो',
'Partner Organization Details': 'साझेदार संस्था विवरण',
'Partner Organization updated': 'साझेदार संस्था परिमार्जन गरियो',
'Partner Organizations': 'साझेदार संस्थाहरू',
'Partners': 'साझेदारहरू',
'Partnerships': 'साझोदारी',
'Pass': 'पास',
'Passport': 'पास्पोर्ट',
'Password': 'पासवर्ड',
'PDF File': 'पि.डि.एफ. फाइल',
'Peer Support': 'मित्र सहयोग',
'Pending': 'प्रकृयाको क्रममा रहेको',
'per': 'प्रति',
'Percentage': 'प्रतिशत',
'Performance Rating': 'प्रश्तुती स्तर',
'Permanent Home Address': 'स्थायी गृह ठेगाना',
'Person': 'ब्यक्तिको नाम',
'Person added': 'ब्यक्ति संचित गरियो',
'Person deleted': 'ब्यक्ति हटाइयो',
'Person Details': 'ब्यक्ति विवरण',
'Person details updated': 'ब्यक्ति विवरण परिमार्जन गरियो',
'Person Entity': 'ब्यक्ति अंग',
'Person must be specified!': 'ब्यक्ति उल्लेख हुनैपर्छ!',
'Person or OU': 'ब्यक्ति वा ओ.यू.',
'Person Registry': 'ब्यक्ति दर्ता',
'Person who has actually seen the person/group.': 'ब्यक्ति जसले वास्तबमानै ब्यक्ति/समूहलाई देखेको छ ।',
"Person's Details": 'ब्यक्तिको विवरण',
"Person's Details added": 'ब्यक्तिको विवरण संचित गरियो',
"Person's Details deleted": 'ब्यक्तिको विवरण हटाइयो',
"Person's Details updated": 'ब्यक्तिको विवरण परिमार्जन गरियो',
'Personal': 'ब्यक्तिगत',
'Personal Details': 'ब्यक्तिगत विवरण',
'Personal Profile': 'ब्यक्तिगत प्रोफाइल',
'Persons': 'ब्यक्तिहरू',
"Persons' Details": 'ब्यक्तिको विवरण',
'Philippine Pesos': 'फिलिपिनि पिसोस्',
'Phone': 'फोन',
'Phone #': 'फोन #',
'Phone 1': 'फोन १',
'Phone 2': 'फोन २',
'Phone number is required': 'फोन नम्बर आवश्यक छ',
'Photograph': 'फोटो',
'PIFACC Priorities': 'पि.आइ.एफ.ए.सि.सि. प्राथमिकताहरू',
'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'पि.आइ.एफ.ए.सि.सि.-१: भौतिक लागु, तल्लो तहमा लागु गर्ने कार्य मापदण्डहरू',
'PIFACC-2: Governance and Decision Making': 'पि.आइ.एफ.ए.सि.सि.-२: जाँच र निर्णय',
'PIFACC-3: Improving our understanding of climate change': 'पि.आइ.एफ.ए.सि.सि.-३: मौसम परिवर्तनको बारेमा हाम्रो बुझाइ सुधार गर्नु',
'PIFACC-4: Education, Training and Awareness': 'पि.आइ.एफ.ए.सि.सि.-४: शिक्षा, तालिम र जनचेतना',
'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'पि.आइ.एफ.ए.सि.सि.-५: विश्य ग्रिनहाउस ग्यस इमिसनमा सुधार',
'PIFACC-6: Partnerships and Cooperation': 'पि.आइ.एफ.ए.सि.सि.-6: साझेदार र सहकार्य',
'PIL (Python Image Library) not installed': 'पि.आइ.एल. (बहुभुजिय तस्विरलय) इन्स्टल भएको छैन',
'PIL (Python Image Library) not installed, images cannot be embedded in the PDF report': 'पि.आइ.एल. (बहुभुजिय तस्विरलय) इन्स्टल भएको छैन, तस्विरहरू पि.डि.एफ. प्रतिवेदनमा देखिन सक्दैन',
'Place of Birth': 'जन्म स्थान',
'Place on Map': 'नक्सामा स्थान',
'Planning and Construction of Drainage Systems ': 'ड्रेनएज प्रकृयाहरूको योजना र निर्माण',
'Please choose a type': 'कृपया एउटा प्रकार छान्नुहोस्',
'Please enter a first name': 'कृपया पहिलो नाम टाइप गर्नुहोस्',
'Please enter a last name': 'कृपया अन्तिम नाम टाइप गर्नुहोस्',
'Please enter a number only': 'संख्यामात्र टाइप गर्नुहोस् ',
'Please enter a valid email address': 'कृपया प्रमाणित इमेल ठेगाना टाइप गर्नुहोस्',
'Please fill this!': 'कृपया यसलाई भर्नुहोस्!',
"Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'समस्या देखा पर्ने स्थानको यू.आर.एल.(हरू) सहित सकेसम्म धेरै जानकारी प्रदान गर्नुहोस्, वा नयाँ विशेषतामा जानु चाहानुहुन्छ भने ।',
'Please record Beneficiary according to the reporting needs of your project': 'कृपया तपाईंको परियोजनाको आवश्यकता अनुसार हकदारको विवरण राख्नुहोस्',
'Please Select a Facility': 'कृपया एउटा सुविधा छान्नुहोस्',
'Please select a valid image!': 'कृपया मान्य तस्विर राख्नुहोस्!',
'Please select exactly two records': 'कृपया दूइ विवरणहरू छान्नुहोस्',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'कुनैपनि थप जानकारी विवरण राख्नको निम्ति यो क्षेत्र प्रयोग गर्नुहोस्, विवरण परिमार्जन गरिएको छ भने कृपया विवरण इतिहास प्रदान गर्नुहोस् ।',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'कृपया कुनैपनि थप जानकारी राख्नको निम्ति यो क्षेत्र प्रयोग गर्नुहोस्, जस्तै उसाहिदिको उदाहरण आइ.डि.हरू । विवरण परिमार्जन गरिएको छ भने कृपया विवरण इतिहास प्रदान गर्नुहोस् ।',
'PMER': 'पि.एम.इ.आर.',
'PMER Development': 'सुरक्षित आवास जनचेतना सहभागिता अवधारणा',
'PoI': 'धुर्व',
'PoI Type added': 'धुर्व प्रकार संचित गरियो',
'PoI Type deleted': 'धुर्व प्रकार हटाइयो',
'PoI Type Details': 'धुर्व प्रकार विवरण',
'PoI Type updated': 'धुर्व प्रकार परिमार्जन गरियो',
'PoI Types': 'धुर्व प्रकारहरू',
'Point of Interest added': 'रूचीको बुँदा संचित गरियो',
'Point of Interest deleted': 'रूचीको बुँदा हटाइयो',
'Point of Interest Details': 'रूचीको बुँदा विवरण',
'Point of Interest updated': 'रूचीको बुँदा परिमार्जन गरियो',
'Points of Interest': 'रूचीको बुँदा',
'PoIs': 'धुर्वहरू',
'PoIs successfully imported.': 'धुर्व पूर्णरूपले प्रवेश गरियो',
'Policies & Strategies': 'नियम तथा उद्देश्य(हरू)',
'Policy Development': 'नीति निर्माण',
'Policy or Strategy': 'नियम वा उद्देश्य',
'Policy or Strategy added': 'नियम वा उद्देश्य संचित गरियो',
"Policy or Strategy added, awaiting administrator's approval": 'नियम वा उद्देश्य संचित गरियो, प्रतिक्षित संचालकको प्रमाणिकरण',
'Policy or Strategy deleted': 'नियम वा उद्देश्य हटाइयो',
'Policy or Strategy updated': 'नियम वा उद्देश्य परिमार्जन गरियो',
'Polygon': 'बहुभुजा',
'Poor': 'गरिब',
'Population': 'जनसंख्या',
'Population Density 2010 (Persons per km2)': 'जनघनत्व 2010 (Persons per km2)',
'Popup Fields': 'पप्-अप क्षेत्रहरू',
'Popup Label': 'पप्-अप स्तर',
'Position': 'पद',
'Positions': 'पदहरू',
'Post Harvest Storage and Management': 'भावि कटनि भण्डारण र ब्यबस्थापन',
'Postcode': 'पोष्ट कोड',
'Power Supply Type': 'शक्ति निर्यात प्रकार',
'Powered by': 'प्रायोजन',
'Powered by Sahana Eden': 'साहाना इडेन प्रायोजन',
'Preferred Name': 'रूचिको नाम',
'Presence': 'उपस्थिति',
'Presence Condition': 'उपस्थिति अवस्था',
'Presence Log': 'उपस्थिति सूची',
'Previous': 'अघिल्लो',
'Previous View': 'अघिल्लो दृश्य',
'Print': 'प्रिन्ट',
'Priority': 'प्राथमिकता',
'Priority from 1 to 9. 1 is most preferred.': '१ देखी ९ सम्म प्राथमिकता । १ सबैभन्दा रूचाइएको',
'Privacy': 'गोप्यता',
'Private': 'ब्यक्तिगत',
'Private-Public Partnerships': 'ब्यक्तिगत',
'Procedure': 'प्रकृया',
'Processing': 'कार्य प्रकृया',
'Profession': 'ब्यवसाय',
'Professional Experience': 'ब्यबसायिक अनुभव',
'Professional Experience added': 'ब्यबसायिक अनुभव संचित गरियो',
'Professional Experience deleted': 'ब्यबसायिक अनुभव हटाइयो',
'Professional Experience Details': 'ब्यबसायिक अनुभव विवरण',
'Professional Experience updated': 'ब्यबसायिक अनुभव परिमार्जन गरियो',
'Profile': 'विवरण',
'Profile Configuration': 'प्रोफाइल बनावट',
'Profile Configuration removed': 'प्रोफाइल बनावट हटाइयो',
'Profile Configuration updated': 'प्रोफाइल बनावट परिमार्जन गरियो',
'Profile Configurations': 'प्रोफाइल बनावटहरू',
'Profile Configured': 'प्रोफाइल बनावट मिलाइयो',
'Profile Details': 'प्रोफाइल विवरण',
'Profile Page': 'प्रोफाइल पृष्ठ',
'Profile Picture': 'प्रोफाइल तस्बिर',
'Profile Picture?': 'प्रोफाइल तस्बिर?',
'Profiles': 'प्रोफाइलहरू',
'Program': 'कार्यक्रम',
'Program added': 'कार्यक्रम संचित गरियो',
'Program deleted': 'कार्यक्रम हटाइयो',
'Program Details': 'कार्यक्रम विवरण',
'Program Hours (Month)': 'कार्यक्रम समय (घण्टा) (महिना)',
'Program Hours (Year)': 'कार्यक्रम समय (घण्टा) (वर्ष)',
'Program updated': 'कार्यक्रम परिमार्जन गरियो',
'Programme Manager': 'कार्यक्रम व्यवस्थापक',
'Programme Planning and Management': 'कार्यक्रम योजना तर्जुमा र व्यवस्थापन',
'Programme Preparation and Action Plan, Budget & Schedule': 'योजना तर्जुमा, कार्य तालिका, बजेट',
'Programs': 'कार्यक्रमहरू',
'Project': 'परियोजना',
'Project added': 'परियोजना संचित गरियो',
'Project Assessments and Planning': 'परियोजना लेखाजोखा र तर्जुमा',
'Project Calendar': 'परियोजना पात्रो',
'Project Communities': 'परियोजना संचालित समुदायहरु',
'Project deleted': 'परियोजना हटाइयो',
'Project Details': 'परियोजना विवरण',
'Project Name': 'परियोजना नाम',
'Project not Found': 'परियोजना प्राप्त हुन सकेन',
'Project Officer': 'परियोजना कर्मचारी',
'Project Organization Details': 'परियोजना संस्था विवरण',
'Project Organization updated': 'परियोजना संस्था परिमार्जन गरियो',
'Project Organizations': 'परियोजना संस्थाहरू',
'Project Report': 'परियोजना प्रतिवेदन',
'Project Task': 'परियोजना काम',
'Project Time Report': 'परियोजना समय प्रतिवेदन',
'Project updated': 'परियोजना परिमार्जन गरियो',
'Projection': 'योजना',
'Projection added': 'योजना संचित गरियो',
'Projection deleted': 'योजना हटाइयो',
'Projection Details': 'योजना विवरण',
'Projection Type': 'योजना प्रकार',
'Projection updated': 'योजना परिमार्जन गरियो',
'Projections': 'योजनाहरू',
'Projects': 'परियोजनाहरु',
'Projects Map': 'परियोजनाहरु नक्सा',
'Proposed': 'प्रस्तावित',
'Protecting Livelihoods': 'योजना तर्जुमा र ब्यवस्थापन',
'Provide a password': 'पासवर्ड उपलब्ध गर्नुहोस्',
'Provision of Inputs': 'लागतको ब्यबस्था',
'Provision of Tools and Equipment': 'औजार र उपकरणहरुको व्यवस्था',
'Psychosocial Support': 'जीविकोपार्जन संरक्षण',
'Public': 'सामाजिक',
'Purchase Date': 'खरिद मिति',
'Purchase Price': 'खरिद रकम',
'Python GDAL required for Shapefile support!': 'आकारफाइल सहयोगको लागि बहुभुजिय जि.डि.ए.एल. आवश्यक !',
'Python needs the ReportLab module installed for PDF export': 'प्रतिवेदन ल्याब इन्स्टल भएको छैन',
'Python needs the xlrd module installed for XLS export': 'गल्ती: एक्स.एल.एस. निर्यातको लागि चलिरहेको पाइथनलाई एक्स.एल.आर.डि. मोड्यूल इन्स्टल भएको हुनुपर्दछ ।',
'Python needs the xlwt module installed for XLS export': 'गल्ती: एक्स.एल.एस. निर्यातको लागि चलिरहेको पाइथनलाई एक्स.एल.डब्ल्यू.टि. मोड्यूल इन्स्टल भएको हुनुपर्दछ ।',
'Quantity': 'परिमाण',
'Query': 'सोधपुछ',
'Query Feature': 'सोधपुछ विशेषता',
'Queryable?': 'सोधपुछयोग्य ?',
'Race': 'दौड',
'Rainfall - last 1 day (mm)': 'बर्षा – गएको एक दिन (मिमि)',
'Rainfall - last 10 days accumulated (mm)': 'बर्षा – गएको दश दिन (मिमि) जम्मा',
'Rangeland, Fisheries and Forest Management': 'भुमी, माछा-क्षेत्र र वन ब्यबस्थापन',
'Rapid Data Entry': 'लगातार आँकडा प्रवेश',
'Rating': 'स्तर',
'RDRT (Regional Disaster Response Teams)': 'आर.डि.आर.टि. (क्षेत्रिय प्रकोप प्रतिकृया समूहहरू)',
'RDRT Members': 'आर.डि.आर.टि. सदस्यहरू',
'RDRT Type': 'आर.डि.आर.टि. प्रकार',
'READ': 'हेर्नुहोस्',
'Ready': 'तयार',
'Receive %(opt_in)s updates:': ' %(opt_in)s परिमार्जन(हरू) प्राप्त गर्नुहोस्:',
'Receive updates': 'परिमार्जन(हरू) प्राप्त गर्नुहोस्',
'Received Shipments': 'जहाजिकरण प्राप्त गर्नुहोस्',
'Record': 'विवरण',
'Record added': 'विवरण संचित गरियो',
'Record already exists': 'विवरण पहिले नै रहेको छ',
'Record approved': 'विवरण प्रमाणित भयो',
'Record could not be approved.': 'विवरण प्रमाणित हुन सकेन',
'Record could not be deleted.': 'विवरण हटाउन सकिएन',
'Record deleted': 'विवरण हटाइयो',
'Record Details': 'विवरण विवरण',
'Record not found': 'विवरण प्राप्त भएन',
'Record not found!': 'विवरण प्राप्त भएन!',
'Record updated': 'विवरण परिमार्जन गरियो',
'Record Updates': 'विवरण परिमार्जन(हरू)',
'Records': 'विवरणहरू',
'records deleted': 'विवरणs हटाइयो',
'Records merged successfully.': 'विवरणहरू पूर्णरूपमा एकै गरियो',
'Recovery': 'पूनर्लाभ',
'red': 'रेड',
'Red Cross & Red Crescent National Societies': 'रेडक्रस तथा रेडक्रिसेन्ट सोसाइटीहरु',
'Red Cross / Red Crescent': 'रेड क्रस / रेड क्रिसेन्ट',
'Referral': 'निवेदन',
'Refresh Rate (seconds)': 'रिफ्रेस् दर (सेकेण्ड)',
'Region': 'क्षेत्र',
'Region added': 'क्षेत्र संचित गरियो',
'Region deleted': 'क्षेत्र हटाइयो',
'Region Details': 'क्षेत्र विवरण',
'Region Location': 'क्षेत्र स्थान',
'Region updated': 'क्षेत्र परिमार्जन गरियो',
'Regional': 'क्षेत्रीय',
'Regions': 'क्षेत्रहरू',
'Register': 'दर्ता',
'Register As': 'को रूपमा दर्ता',
'Register for Account': 'एकाउन्टको लागि दर्ता',
'Registered users can %(login)s to access the system': 'दर्ता गरिएको प्रयोगकर्ताहरू सिस्टम पहुँचको लागि %(login)s गर्न सक्छन्',
'Registration not permitted': 'दर्ताकार्य अनुमति छैन',
'Reject': 'अस्विकार',
'Relationship': 'सम्बन्ध',
'Relief Team': 'राहात समूह',
'Religion': 'धर्म',
'reload': 'पुन:लोड गर्नुहोस्',
'Reload': 'पुन:लोड गर्नुहोस्',
'Remove': 'हटाउनुहोस्',
'Remove Coalition': 'संस्था हटाउनुहोस्',
'Remove existing data before import': 'आयात गर्नुभन्दा पहिले हालको तथ्यांक हटाउनुहोस्',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'विशेषता हटाउनुहोस्: तपाईंले हटाउन चहानुभएको विशेषता छान्नुहोस् र डिलिट कि थिच्नुहोस्',
'Remove Layer from Profile': 'प्रोफाइलबाट तह हटाउनुहोस्',
'Remove Layer from Symbology': 'चिन्हताबाट तह हटाउनुहोस्',
'Remove Network': 'नेटवर्क हटाउनुहोस्',
'Remove Organization from Project': 'परियोजनाबाट संस्था हटाउनुहोस्',
'Remove Profile Configuration for Layer': 'तहको लागि प्रोफाइल बनावट हटाउनुहोस्',
'Remove selection': 'छानिएको हटाउनुहोस्',
'Remove Skill': 'सिप हटाउनुहोस्',
'Remove Symbology from Layer': 'तहबाट चिन्हता हटाउनुहोस्',
'Remove this entry': 'यो प्रवेश हटाउनुहोस्',
'Reopened': 'पुन:खोलियो',
'Repeat': 'दोहोर्याउनुहोस्',
'Repeat your password': 'तपाईंको पासवर्ड दोहोर्याउनुहोस्',
'Replace': 'स्थानान्तर',
'Replacing or Provisioning Livelihoods': 'पुनर्लाभ',
'Reply': 'उत्तर',
'Report': 'प्रतिवेदन',
'Report of': 'को प्रतिवेदन',
'Report on Annual Budgets': 'वार्षिक बजेट(हरू)को प्रतिवेदन',
'Report Options': 'प्रतिवेदन विकल्पहरू',
'Reports': 'प्रतिवेदनहरु',
'representation of the Polygon/Line.': 'बहुभुजा/धर्काको प्रस्तुतिकरण',
'Request': 'अनुरोध',
'Requested By Facility': 'सुविधाद्वारा अनुरोध गरियो',
'Requested Items': 'अनुरोध गरिएका वस्तुहरू',
'Requests': 'अनुरोधहरू',
'Requires Login': 'लगिन गर्न आवश्यक',
'Reset': 'पहिलेको स्थितिमा',
'Reset all filters': 'फिल्टर(हरू) पहिलेको स्थितिमा राख्नुहोस्',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'विशेषता पुन:आकार दिनुहोस्: तपाईंले आकार दिन चाहानुभएको विशेषतालाई छान्नुहोस् र सम्बन्धित बिन्दुलाई तपाईंले चाहेको आकारमा मिलाउनुहोस्',
'Resource added': 'स्रोत संचित गरियो',
'Resource deleted': 'स्रोत हटाइयो',
'Resource Details': 'स्रोत विवरण',
'Resource Inventory': 'स्रोत लेखा विवरण',
'Resource Management System': 'स्रोत ब्यबस्थापन प्रकृया',
'Resource Management System account has been activated': 'स्रोत ब्यबस्थापन प्रकृया एकाउन्ट सकृय गरिएको छ',
'Resource Mobilization': 'जीविकोपार्जन प्रतिस्थापन र प्रावधान',
'Resource Transfers for Acquiring Assets': 'भएको सम्पतिकोलागि स्रोत पठाउनुहोस्',
'Resource Transfers for Replacing/ Provisioning Or Consumption': 'स्थानान्त्रण/व्यवस्थापन वा प्रयोगको लागि स्रोत पठाउनुहोस्',
'Resource Type': 'स्रोत प्रकार',
'Resource Type added': 'स्रोत प्रकार संचित गरियो',
'Resource Type deleted': 'स्रोत प्रकार हटाइयो',
'Resource Type Details': 'स्रोत प्रकार विवरण',
'Resource Type updated': 'स्रोत प्रकार परिमार्जन गरियो',
'Resource Types': 'स्रोत प्रकारहरू',
'Resource updated': 'स्रोत परिमार्जन गरियो',
'Responded': 'प्रतिकृया दिइयो',
'Response': 'प्रतिकृया',
'Response Summaries': 'प्रतिकृया संक्षेप(हरू)',
'Response Summary Added': 'प्रतिकृया संक्षेप संचित गरियो',
'Response Summary Deleted': 'प्रतिकृया संक्षेप हटाइयो',
'Response Summary Details': 'प्रतिकृया संक्षेप विवरण',
'Response Summary Report': 'प्रतिकृया संक्षेप प्रतिवेदन',
'Response Summary Updated': 'प्रतिकृया संक्षेप परिमार्जन गरियो',
'REST Filter': 'बाँकि फिल्टर',
'Restarting Livelihoods': 'श्रोत परिचालन',
'Retrieve Password': 'पुन:प्राप्त पासवर्ड',
'retry': 'पुन:प्रयास् गर्नुहोस्',
'Revert Entry': 'उल्टो प्रवेश',
'Review': 'पुर्नअवलोकन',
'RFA Priorities': 'आर.एफ.ए. प्राथमिकताहरू',
'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'आर.एफ.ए.१: जाँच-संस्थागत, शैक्षिक-संस्थागत, नियम र निर्णय तयारी',
'RFA2: Knowledge, Information, Public Awareness and Education': 'आर.एफ.ए.२: ज्ञान, जानकारी, सामाजिक जनचेतना र शिक्षा',
'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'आर.एफ.ए.३: खतरा(हरू)को अनुसन्धान र मुल्याङ्कन, पूर्वतयारी र खतरामा रहेको सामाग्री',
'RFA4: Planning for Effective Preparedness, Response and Recovery': 'आर.एफ.ए.४: प्रभावकारी पुर्वतयारीको निम्ति योजना, प्रतिकृया र सुधार',
'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'आर.एफ.ए.५: प्रभावकारी, समायोजित र जन-केन्द्रित अग्रिम सचेतना प्रकृयाहरू',
'RFA6: Reduction of Underlying Risk Factors': 'आर.एफ.ए.6: वर्तमान खतराका पक्षहरू न्यूनिकरण',
'Risk Identification & Assessment': 'जीवीकोर्पाजन पुनर्शुरुवात',
'Risk Management and Quality Assurance': 'खतरा ब्यबस्थापन र गुणस्तर सुनिस्चितता',
'Risk Transfer': 'खतरा कटौती',
'RMS': 'आर.एम.एस.',
'RMS Team': 'आर.एम.एस. समूह',
'Road Safety': 'सडक सुरक्षा',
'Role': 'भूमिका',
'Role added': 'भूमिका संचित गरियो',
'Role assigned to User': 'प्रयोगकर्तालाई भूमिका हस्तान्तरण गरियो',
'Role deleted': 'भूमिका हटाइयो',
'Role Details': 'भूमिका विवरण',
'Role Name': 'भूमिका नाम',
'Role Required': 'भूमिका आवश्यक',
'Role updated': 'भूमिका परिमार्जन गरियो',
'Roles': 'भूमिकाहरू',
'Roles currently assigned': 'हाल हस्तान्तरण गरिएको भूमिका',
'Roles of User': 'प्रयोगकर्ताको भूमिका(हरू)',
'Roles Permitted': 'भूमिकाहरू स्विकृति गरियो',
'Roles updated': 'भूमिकाs परिमार्जन गरियो',
'Room': 'कोठा',
'Room added': 'कोठा संचित गरियो',
'Room deleted': 'कोठा हटाइयो',
'Room Details': 'कोठा विवरण',
'Room updated': 'कोठा परिमार्जन गरियो',
'Rooms': 'कोठा(हरू)',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'विशेषता अर्को तर्फ राख्नु: तपाईंले अर्को तर्फ राख्न चाहानुभएको विशेषता छान्नुहोस् र सम्बन्धित बिन्दुको माध्यम्द्वारा तापईंले चाहे अनुसार स्थान परिवर्तन गरि राख्नुहोस्',
'Run every': 'हरेक संचालन गर्नुहोस्',
'S3PivotTable unresolved dependencies': 'S३ वृत्ततालिका समाधान नभएको क्षेत्र',
'Sahana Community Chat': 'साहाना समुदाय कुराकानी',
'Sahana Eden Humanitarian Management Platform': 'साहाना इदेन मानवतावादि ब्यबस्थापन कार्यक्षेत्र',
'Sahana Eden Website': 'साहाना इदेन वेवसाइट',
'Sanitation': 'सफाई',
'Save': 'संचित गर्नुहोस्',
'Save and Continue Editing': 'परिवर्तन संचित गरेर निरन्तरता दिनुहुन्छ?',
'Save as New Map?': 'नयाँ नक्साको रूपमा संचित गर्नुहुन्छ?',
'Save Map': 'नक्सा संचित गर्नुहोस्',
'Save search': 'खोजी संचित गर्नुहोस्',
'Save this search': 'यस खोजीलाई संचित गर्नुहोस्',
'Save: Default Lat, Lon & Zoom for the Viewport': 'संचित गर्नुहोस्: भिउपोर्टको लागि स्वचलानमा रहेको लाट, लोन तथा जुम',
'Saved': 'संचित',
'Saved Filters': 'संचित फिल्टर(हरू)',
'Saved filters': 'संचित फिल्टर(हरू)',
'Saved Filters...': 'संचित फिल्टर(हरू)...',
'Saved Maps': 'संचित नक्सा(हरू)',
'Saved search added': 'संचित खोजी संचित गरियो',
'Saved search deleted': 'संचित खोजी हटाइयो',
'Saved search details': 'संचित खोजी विवरण',
'Saved search updated': 'संचित खोजी परिमार्जन गरियो',
'Saved Searches': 'संचित खोजी(हरू)',
'Saved searches': 'संचित खोजी(हरू)',
'Scanned Copy': 'स्क्यान गरिएको प्रति',
'Scanned Forms Upload': 'स्क्यान गरिएको फारम(हरू) अपलोड गर्नुहोस्',
'Scheduled Jobs': 'सूचिकृत काम(हरू)',
'Schema': 'योजना',
'School Health': 'विधालय स्वास्थ',
'School Holidays only': 'विद्यालय विदाहरू मात्र',
'School RC Units Development': 'विद्यालय आर.सि.इकाईहरू विकास',
'School Safety and Children Education,': 'विद्यालय सुरक्षा र बाल शिक्षा,',
'Seaport': 'खेलकुद',
'Search': 'खोजी',
'Search %(site_label)s Status': '%(site_label)s अवस्था खोजी',
'Search Activities': 'कृयाकलाप(हरू) खोजी',
'Search Activity Types': 'कृयाकलाप प्रकार(हरू) खोजी',
'Search Addresses': 'ठेगाना(हरू) खोजी',
'Search Affiliations': 'स्वीकृती(हरू) खोजी',
'Search Annual Budgets': 'वार्षिक बजेट(हरू) खोजी',
'Search Appraisals': 'मुल्यांकन(हरू) खोजी',
'Search Awards': 'परस्कार(हरू) खोजी',
'Search Beneficiaries': 'भागिदारहरू खोजी',
'Search Beneficiary Types': 'भागिदार प्रकार(हरू) खोजी',
'Search Branch Organizations': 'शाखा संस्था(हरू) खोजी',
'Search by skills': 'सिप(हरू) अनुसार खोजी',
'Search Campaigns': 'क्याम्पिन(हरू) खोजी',
'Search Certificates': 'प्रमाण-पत्र(हरू) खोजी',
'Search Certifications': 'प्रमाणिकरण(हरू) खोजी',
'Search Clusters': 'समूह(हरू) खोजी',
'Search Coalitions': 'संस्था(हरू) खोजी',
'Search Communities': 'समुदाय(हरू) खोजी',
'Search Community Contacts': 'समुदाय सम्पर्क(हरू) खोजी',
'Search Competency Ratings': 'प्रतिस्पर्धा स्तर(हरू) खोजी',
'Search Contact Information': 'सम्पर्क जानकारी खोजी',
'Search Contacts': 'सम्पर्क(हरू) खोजी',
'Search Course Certificates': 'पाठ्यक्रम प्रमाण-पत्र(हरू) खोजी',
'Search Courses': 'पाठ्यक्रम(हरू) खोजी',
'Search Credentials': 'कागजात(हरू) खोजी',
'Search Criteria': 'सिमितता खोजी',
'Search Departments': 'मन्त्रालय(हरू) खोजी',
'Search Deployments': 'परियोजन(हरू) खोजी',
'Search Donors': 'दाता(हरू) खोजी',
'Search Education Details': 'शिक्षा विवरण खोजी',
'Search Entries': 'प्रवेश(हरू) खोजी',
'Search Facilities': 'सूबिधाहरू खोजी',
'Search Facility Types': 'सुविधा प्रकार(हरू) खोजी',
'Search Feature Layers': 'विशेषता तह(हरू) खोजी',
'Search for a Person': 'ब्यक्ति खोजी',
'Search for a Project by name, code, location, or description.': 'नाम, कोड, स्थान, वा ब्याख्याअनुसार परियोजना खोजी',
'Search for a Project by name, code, or description.': 'नाम, कोड, वा ब्याख्याअनुसार परियोजना खोजी',
'Search for a Project Community by name.': 'नामद्वरा परियोजना समुदाय खोजी',
'Search for Activity Organization': 'कृयाकलाप ब्यबस्थापन खोजी',
'Search for Activity Type': 'कृयाकलाप प्रकार खोजी',
'Search for office by organization or branch.': 'संस्था वा शाखाअनुसार कार्यलय खोजी',
'Search for office by organization.': 'संस्थाअनुसार कार्यलय खोजी',
'Search Groups': 'समूह(हरू) खोजी',
'Search Hazards': 'खतरा(हरू) खोजी',
'Search Hours': 'समय (घण्टा) खोजी',
'Search Identity': 'परिचय खोजी',
'Search Images': 'तस्विर(हरू) खोजी',
'Search Job Titles': 'पद खोजी',
'Search Keywords': 'मुख्यशब्द(हरू) खोजी',
'Search Layers': 'तह(हरू) खोजी',
'Search Location': 'स्थान खोजी',
'Search Location Hierarchies': 'स्थान संरचनाहरू खोजी',
'Search location in Geonames': 'भु-नाम अनुसार स्थान खोजी',
'Search Locations': 'स्थान(हरू) खोजी',
'Search Log Entry': 'दर्ताप्रवेश खोजी',
'Search Logged Time': 'सूचिकृत समय खोजी',
'Search Mailing Lists': 'ठेगाना तालिका(हरू) खोजी',
'Search Map Profiles': 'नक्सा बनावट(हरू) खोजी',
'Search Markers': 'चिन्ह(हरू) खोजी',
'Search Member': 'सदस्य खोजी',
'Search Members': 'सदस्य(हरू) खोजी',
'Search Membership': 'सदस्यता खोजी',
'Search Membership Types': 'सदस्यता प्रकार(हरू) खोजी',
'Search Milestones': 'उद्देश्य(हरू) खोजी',
'Search Networks': 'नेटवर्क(हरू) खोजी',
'Search Office Types': 'कार्यलय प्रकार(हरू) खोजी',
'Search Offices': 'कार्यलय(हरू) खोजी',
'Search Open Tasks for %(project)s': ' %(project)s को लागि खुला काम(हरू) खोजी',
'Search Organization Types': 'संस्था प्रकार(हरू) खोजी',
'Search Organizations': 'संस्था(हरू) खोजी',
'Search Participants': 'सहभागी(हरू) खोजी',
'Search Partner Organizations': 'साझेदार संस्था(हरू) खोजी',
"Search Person's Details": 'ब्यक्तिको विवरण खोजी',
'Search Persons': 'ब्यक्ति(हरू) खोजी',
'Search PoI Types': 'धुर्व प्रकार(हरू) खोजी',
'Search Points of Interest': 'रूचीको बुँदा खोजी',
'Search Policies & Strategies': 'नियम तथा उद्देश्य(हरू) खोजी',
'Search Professional Experience': 'ब्यबसायिक अनुभव खोजी',
'Search Programs': 'कार्यक्रम(हरू) खोजी',
'Search Project Organizations': 'परियोजना संस्था(हरू) खोजी',
'Search Projections': 'योजना(हरू) खोजी',
'Search Projects': 'परियोजना(हरू) खोजी',
'Search Records': 'विवरण(हरू) खोजी',
'Search Red Cross & Red Crescent National Societies': 'रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटिज् खोजी',
'Search Regions': 'क्षेत्र(हरू) खोजी',
'Search Resource Types': 'स्रोत प्रकार(हरू) खोजी',
'Search Resource Inventory': 'स्रोत लेखा विवरण खोजी',
'Search Response Summaries': 'प्रतिकृया संक्षेप खोजी',
'Search Results': 'नतिजाहरू खोजी',
'Search Roles': 'भूमिका(हरू) खोजी',
'Search Rooms': 'कोठा(हरू) खोजी',
'Search saved searches': 'संचित खोजीहरू अनुसार खोजी',
'Search Sectors': 'क्षेत्र(हरू) खोजी',
'Search Services': 'सेवा(हरू) खोजी',
'Search Shipped Items': 'स्थानान्तर वस्तु(हरू) खोजी',
'Search Skill Equivalences': 'सिप सरह(हरू) खोजी',
'Search Skill Types': 'सिप प्रकार(हरू) खोजी',
'Search Skills': 'सिप(हरू) खोजी',
'Search Staff': 'कर्मचारी खोजी',
'Search Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू) खोजी',
'Search Staff Assignments': 'कर्मचारी काम(हरू) खोजी',
'Search Symbologies': 'चिन्हताहरू खोजी',
'Search Tasks': 'काम(हरू) खोजी',
'Search Teams': 'समूह(हरू) खोजी',
'Search Theme Data': 'स्वरूप आँकडा खोजी',
'Search Themes': 'स्वरूप(हरू) खोजी',
'Search Training Events': 'तालिम कार्यक्रम(हरू) खोजी',
'Search Training Participants': 'तालिम सहभागी(हरू) खोजी',
'Search Volunteer Cluster Positions': 'स्वयम्-सेवक समूह पद(हरू) खोजी',
'Search Volunteer Cluster Types': 'स्वयम्-सेवक समूह प्रकार(हरू) खोजी',
'Search Volunteer Clusters': 'स्वयम्-सेवक समूह(हरू) खोजी',
'Search Volunteer Roles': 'स्वयम्-सेवक भूमिका(हरू) खोजी',
'Search Volunteers': 'स्वयम्-सेवक(हरू) खोजी',
'Secondary Server (Optional)': 'द्दित्तिय सर्वर (वैकल्पिक)',
'seconds': 'सेकेण्ड',
'Seconds must be a number.': 'सेकेण्ड संख्यामा नै हुनुपर्छ ।',
'Seconds must be less than 60.': 'सेकेण्ड ६० भन्दा कम हुनुपर्छ ।',
'Secretary General': 'प्रमुख',
'Sector': 'क्षेत्र',
'Sector added': 'क्षेत्र संचित गरियो',
'Sector added to Organization': 'संस्थामा क्षेत्र संचित गरियो',
'Sector added to Project': 'परियोजनामा क्षेत्र संचित गरियो',
'Sector added to Theme': 'स्वरूपमा क्षेत्र संचित गरियो',
'Sector deleted': 'क्षेत्र हटाइयो',
'Sector Details': 'क्षेत्र विवरण',
'Sector removed from Organization': 'संस्थाबाट क्षेत्र हटाइयो',
'Sector removed from Project': 'परियोजनाबाट क्षेत्र हटाइयो',
'Sector removed from Theme': 'स्वरूपबाट क्षेत्र हटाइयो',
'Sector updated': 'क्षेत्र परिमार्जन गरियो',
'Sectors': 'क्षेत्र(हरू)',
'Sectors to which this Activity Type can apply': 'यो कृयाकलाप प्रकार लागु गर्न सकिने क्षेत्र(हरू)',
'Sectors to which this Theme can apply': 'यो स्वरूप लागु गर्न सकिने क्षेत्र(हरू)',
'Security': 'सुरक्षा',
'Security Officer': 'सुरक्षा कर्मचारी',
'See All Entries': 'सम्पूण प्रवेश(हरू) हेर्नुहोस्',
'see comment': 'टिप्पणी हेर्नुहोस्',
'see more': 'अझै हेर्नुहोस्',
'Seen': 'हेरियो',
'Select': 'छान्नुहोस्',
'Select %(location)s': '%(location)s छान्नुहोस्',
"Select 2 records from this list, then click 'Merge'.": "यो तालिकाबाट २ विवरणहरू छान्नुहोस्, त्यसपछी 'एकै गर्नुहोस्'मा थिच्नुहोस्",
"Select a Room from the list or click 'Add Room'": "तालिकाबाट एउटा कोठा छान्नुहोस् र 'कोठा राख्नुहोस्'मा क्लिक गर्नुहोस्",
'Select all': 'सबैलाई छान्नुहोस्',
'Select All': 'सबैलाई छान्नुहोस्',
'Select an existing bin': 'हालको बिनलाई छान्नुहोस्',
'Select an image to upload. You can crop this later by opening this record.': 'अपलोड गर्नुको लागि तस्बिर छान्नुहोस् । यो विवरणलाई खोलेर तपाईंले यो तहलाई काट्न सक्नुहुन्छ',
'Select Existing Location': 'हालको स्थान छान्नुहोस्',
'Select from registry': 'दर्ताबाट छान्नुहोस्',
'Select one or more option(s) that apply': 'लागु हुने एक वा थप विकल्प(s) छान्नुहोस्',
'Select resources to import': 'राख्नको निम्ति स्रोत छान्नुहोस्',
'Select the default site.': 'स्वचलानमा रहेको क्षेत्र छान्नुहोस्',
'Select the option that applies': 'लागु हुने विकल्प छान्नुहोस्',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'दुरी पत्ता लगाउनको निम्ति मुल्यंकन र कृयाकलाप(हरू)लाई छान्नुहोस्',
'Select the person assigned to this role for this project.': 'यो परियोजनाको लागि खटिएको व्यक्ति छान्नुहोस्',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "यदि निश्चितहरूलाई बनावट स्थलबाट गहिरो स्तरमा पारिवारिक क्षेत्रको आवस्यक पर्छ भने यसलाई छान्नुहोस् । उदाहरणको लागि, यदि बनावटको सबैभन्दा सानो बिभाजन 'जिल्ला' हो भने, तोकिएको सबै स्थानहरू परिवारको रूपमा जिल्ला नै हुनुपर्दछ ।",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'स्थान बनावटमा सम्पूर्ण क्षेत्रहरूलाई परिवार क्षेत्रको आवस्यकता पर्छ भने, यसलाई छान्नुहोस् । प्रभावित क्षेत्रलाई प्रतिनिधित्व गर्दै यसलाई "क्षेत्र" तोकिएर गर्न सकिन्छ ।',
'Select this if you need this resource to be mapped from site_id instead of location_id.': 'स्थान_आइ.डि.को साटो, क्षेत्र_आइ.डि.बाट यो चित्रत भएको चाहानुहुन्छ भने यसलाई छान्नुहोस्।',
'Select This Location': 'यो स्थान छान्नुहोस्',
'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'छानिएको ओ.सि.आर. फारमको कुनै पृष्ठ(हरू) छैनन् । नयाँ फारमलाई अपलोड गरेर अर्को दोहोरो कार्य गर्नुहोस् ।',
'Send a message to this person': 'यो व्यक्तिलाई संदेश पठउनुहोस्',
'Send a message to this team': 'यो समूहलाई संदेश पठाउनुहोस्',
'Send batch': 'व्यच पठाउनुहोस्',
'Send Message': 'संदेश पठाउनुहोस्',
'Send Task Notification': 'काम सूचना घण्टि पठाउनुहोस्',
'Senior (50+)': 'ठूलो (५0+)',
'Sent Shipments': 'जहाजिकरण पठाउनुहोस्',
'separated': 'छुटाईएको',
'separated from family': 'परिवारबाट छुटाईएको',
'Serial Number': 'क्रम संख्या',
'Service': 'सेवा',
'Service added': 'सेवा संचित गरियो',
'Service added to Organization': 'संस्थामा सेवा संचित गरियो',
'Service deleted': 'सेवा हटाइयो',
'Service Details': 'सेवा विवरण',
'Service Record': 'सेवा विवरण',
'Service removed from Organization': 'संस्थाबाट सेवा हटाइयो',
'Service updated': 'सेवा परिमार्जन गरियो',
'Services': 'सेवा(हरू)',
'Set as my Default': 'मेरो स्वचिलत राख्नुहोस्',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'मुख्यब्यक्ति बाहेकका ब्यक्ति(हरू)ले यो स्तरलाई परिवर्तन गर्नको निम्ति "सत्य"मा राख्नुहोस् ।',
'Settings': 'सेटिङ(हरू)',
'Sex': 'लिंग',
'Sexual and Reproductive Health': 'यौन तथा प्रजनन स्वास्थ',
'Shapefile Layer': 'आकारफाइल तह',
'Share': 'बाँड्नुहोस्',
'shaved': 'काटिएको',
'Shelter': 'बसोबास',
'Shelter Repair Kit': 'आवास मर्मत किट',
'short': 'छोटो',
'Short Description': 'छोटो विवरण',
'Short Title / ID': 'छोटो शिर्षक / आइ.डि.',
'short<6cm': 'छोटो<६से.मि.',
'Show': 'देखाउनुहोस्',
'Show %(number)s entries': '%(number)s प्रवेश(हरू) देखाउनुहोस्',
'Show on Map': 'नक्सामा देखाउनुहोस्',
'Show Pivot Table': 'वृत्त तालिका देखाउनुहोस्',
'Show Table': 'तालिका देखाउनुहोस्',
'Show totals': 'जम्मा(हरू) देखाउनुहोस्',
'Showing 0 to 0 of 0 entries': '0 देखी0 मा 0 प्रवेश(हरू) देखाईंदै',
'Showing _START_ to _END_ of _TOTAL_ entries': ' _शुरू_ देखी _अन्त्य_ को _जम्मा_ प्रवेश(हरू) देखाईंदै',
'sides': 'साइडहरू',
'sign-up now': 'अहिले साइनअप गर्नुहोस्',
'Signature': 'सही',
'Simple Search': 'साधारण खोजी',
'Simulation ': 'बृद्दि ',
'single': 'एकल',
'Single PDF File': 'एकल पि.डि.एफ. फाइल',
'Site': 'क्षेत्र',
'Site Name': 'क्षेत्र नाम',
'Site Planning': 'क्षेत्र योजना',
'Site Selection': 'क्षेत्र निर्धारण',
'Sitemap': 'क्षेत्रनक्सा',
'Situation': 'अवस्था',
'Situation Monitoring/Community Surveillance': 'अवस्था अनुगमन/समुदाय जाँच',
'Skeleton Example': 'फ्रेम उदाहरण',
'Sketch': 'खाका',
'Skill': 'सिप',
'Skill added': 'सिप संचित गरियो',
'Skill Catalog': 'सिप तालिका',
'Skill deleted': 'सिप हटाइयो',
'Skill Details': 'सिप विवरण',
'Skill Equivalence': 'सिप सरह',
'Skill Equivalence added': 'सिप सरह संचित गरियो',
'Skill Equivalence deleted': 'सिप सरह हटाइयो',
'Skill Equivalence Details': 'सिप सरह विवरण',
'Skill Equivalence updated': 'सिप सरह परिमार्जन गरियो',
'Skill Equivalences': 'सिप सरह(हरू)',
'Skill removed': 'सिप हटाइयो',
'Skill Type': 'सिप प्रकार',
'Skill Type added': 'सिप प्रकार संचित गरियो',
'Skill Type Catalog': 'सिप प्रकार तालिका',
'Skill Type deleted': 'सिप प्रकार हटाइयो',
'Skill Type updated': 'सिप प्रकार परिमार्जन गरियो',
'Skill updated': 'सिप परिमार्जन गरियो',
'Skills': 'सिप(हरू)',
'Skin Marks': 'अनुहारको छाला दाग(हरू)',
'slim': 'पातलो',
'Small Scale Mitigation': 'सानो मात्रा सुधार',
'Social Impacts & Resilience': 'सामाजिक प्रभाव र उत्थानशिलता',
'Social Inclusion / Diversity': 'सामाजिक समावेशीकरण/ विविधता',
'Social Mobilisation': 'सामाजिक परिचालन',
'Solid Waste Management': 'ठोस फोहर ब्यवस्थापन',
'Sops and Guidelines Development': 'स्तरीय संचालन प्रकृया र निर्देशिका निर्माण',
'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'माफगर्नुहोस्, स्थान %(location)s परिवार %(parent)s क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'माफगर्नुहोस्, स्थान %(location)s यो परियोजनाले उल्लेख गर्ने क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location appears to be outside the area of parent %(parent)s.': 'माफगर्नुहोस्, स्थान परिवार क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location appears to be outside the area supported by this deployment.': 'माफगर्नुहोस्, स्थान यो परियोजनाले उल्लेख गर्ने क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'माफगर्नुहोस्, नक्सासंचालक भूमिका प्रयोगकर्ता(हरू)का लागि मात्र यी क्षेत्रमा अनुमति छ ।',
'Sorry, there are no addresses to display': 'माफगर्नुहोस्, देखाउनको लागि कुनैपनि ठेगानाहरू छैनन्',
'Source': 'स्रोत',
'Source Name': 'स्रोत नाम',
'Source URL': 'स्रोत यू.आर.एल.',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'निश्चित क्षेत्र (जस्तै, भवन/कोठा) स्थान भित्र यो ब्यक्ति/समूह देखियो ।',
'Specific locations need to have a parent of level': 'निश्चित स्थान(हरू)को स्तरको परिवार हुन आवश्यक छ ।',
'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'खुलासडकनक्सा/गुगल/विङ्गको आधारभुत तह(हरू)को प्रयोग गर्नको लागि स्फेरिकल मेर्कटर (९00९१३)को आवश्यक पर्छ ।',
'Spraying of Vectors': 'विक्टोरहरू छरिँदै',
'Staff': 'कर्मचारी',
'Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू)',
'Staff & Volunteers (Combined)': 'कर्मचारी तथा स्वयम्-सेवक(हरू) (मिलाइएको)',
'Staff Assigned': 'कर्मचारी खटाइएको',
'Staff Assignment Details': 'कर्मचारी काम विवरण',
'Staff Assignment removed': 'कर्मचारी काम हटाइयो',
'Staff Assignment updated': 'कर्मचारी काम परिमार्जन गरियो',
'Staff Assignments': 'कर्मचारी काम(हरू)',
'Staff ID': 'कर्मचारी आइ.डि.',
'Staff Management': 'कर्मचारी ब्यबस्थापन',
'Staff Member added': 'कर्मचारी सदस्य संचित गरियो',
'Staff member added': 'कर्मचारी सदस्य संचित गरियो',
'Staff Member deleted': 'कर्मचारी सदस्य हटाइयो',
'Staff Member Details': 'कर्मचारी सदस्य विवरण',
'Staff Member Details updated': 'कर्मचारी सदस्य विवरण परिमार्जन गरियो',
'Staff Record': 'कर्मचारी विवरण',
'Staff Report': 'कर्मचारी प्रतिवेदन',
'Staff with Contracts Expiring in the next Month': 'अर्को महिनामा सम्झौता(हरू)को म्याद सकिने कर्मचारीहरू',
'Staff/Volunteer Record': 'कर्मचारी/स्वयम्-सेवक विवरण',
'Start Date': 'शुरु मिति',
'Status': 'अवस्था',
"Status 'assigned' requires the %(fieldname)s to not be blank": "खटाइएको' अवस्थालाई खालि नछोड्नको निम्ति %(fieldname)s को आवश्यकता पर्दछ ।",
'Status added': 'अवस्था संचित गरियो',
'Status deleted': 'अवस्था हटाइयो',
'Status Details': 'अवस्था विवरण',
'Status updated': 'अवस्था परिमार्जन गरियो',
'Statuses': 'अवस्था(हरू)',
'Stockpiling, Prepositioning of Supplies': 'भण्डारण, पुर्ती(हरू)को तयारी',
'Stocks and relief items.': 'मौज्दात र राहत सामग्रीहरु',
'Storm Surge': 'हावाहुरी',
'straight': 'सिधा',
'Strategy Development': 'उद्देश्य विकास',
'Street Address': 'सडक ठेगाना',
'Street View': 'सडक दृश्य',
'Strengthening Livelihoods': 'जीवीकोपार्जन सुदृढीकरण',
'String used to configure Proj4js. Can be found from %(url)s': 'बनावट प्रोजे४जे.एस मिलाउनको निम्ति स्ट्रिङ्ग प्रयोग गरिएको छ । %(url)s प्राप्त गर्न सकिन्छ ।',
'Strong': 'बलियो',
'Structural Safety': 'संरचनात्मक सुरक्षा',
'Style': 'तरिका',
'Style invalid': 'तरिका अमान्य',
'Sub Chapter': 'सह अध्याय',
'Sub Regional': 'उपक्षेत्रीय',
'Submission successful - please wait': 'निवेदन सफल - कृपया धैर्य गर्नुहोस्',
'Submit': 'पेश गर्नुहोस्',
'suffered financial losses': 'भोगिएको बित्तिय घाटा(हरू)',
'Supervisor': 'सुपरभाइजर',
'Supplier': 'निर्यातकर्ता',
'Suppliers': 'निर्यातकर्ता(हरू)',
'Supplier/Donor': 'आपूर्तिकर्ता/दाता',
'Swiss Francs': 'स्विजरल्याण्ड फ्र्याङ्क',
'Switch to 3D': '३डि मा जानुहोस्',
'Symbologies': 'चिन्हताहरू',
'Symbology': 'चिन्हता',
'Symbology added': 'चिन्हता संचित गरियो',
'Symbology deleted': 'चिन्हता हटाइयो',
'Symbology Details': 'चिन्हता विवरण',
'Symbology removed from Layer': 'तहबाट चिन्हता हटाइयो',
'Symbology updated': 'चिन्हता परिमार्जन गरियो',
'Table': 'तालिका',
'Table Permissions': 'तालिका स्वीकृती',
'Tablename': 'तालिकानाम',
'Tags': 'ट्याग',
'tall': 'अग्लो',
'Task': 'काम',
'Task added': 'काम संचित गरियो',
'Task deleted': 'काम हटाइयो',
'Task Details': 'काम विवरण',
'Task updated': 'काम परिमार्जन गरियो',
'Tasks': 'काम',
'Team': 'ब्द्यम् रुरुरुरुरु',
'Team added': 'ब्द्यम् रुरुरुरुरु संचित गरियो',
'Team deleted': 'ब्द्यम् रुरुरुरुरु हटाइयो',
'Team Description': 'ब्द्यम् रुरुरुरुरु ब्याख्या',
'Team Details': 'ब्द्यम् रुरुरुरुरु विवरण',
'Team Leader': 'ब्द्यम् रुरुरुरुरु अगुवा',
'Team Member added': 'ब्द्यम् रुरुरुरुरु सदस्य संचित गरियो',
'Team Members': 'ब्द्यम् रुरुरुरुरु सदस्य(हरू)',
'Team Name': 'ब्द्यम् रुरुरुरुरु नाम',
'Team Type': 'ब्द्यम् रुरुरुरुरु प्रकार',
'Team updated': 'ब्द्यम् रुरुरुरुरु परिमार्जन गरियो',
'Teams': 'ब्द्यम् रुरुरुरुरु',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'भुसर्वर लाई मेटाटिलिङ्ग गर्न निर्देशन दिन्छ जसले नक्कल प्रति स्तर(हरू)लाई कम गर्दछ ।',
'Template': 'ढाँचा',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'पाचौँ-स्तरको लागि देश संचालक शाखा भित्र (जस्तै, भोट प्रकृया वा लेखकोड सह-शाखा)। यो स्तर प्राय प्रयोग हुँदैन',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'चौथो-स्तरको लागि देश संचालक शाखा भित्र (जस्तै, गाउँ, छिमेक वा टोल)',
'Term for the primary within-country administrative division (e.g. State or Province).': 'प्रथमिकको लागि देश संचालक भित्र (जस्तै, राज्य वा क्षेत्र)।',
'Term for the secondary within-country administrative division (e.g. District or County).': 'द्दित्तियको लागि देश संचालक शाखा (जस्तै, जिल्ला वा क्षेत्र) ।',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'तेस्रो-स्तरको लागि देश संचालक शाखा (जस्तै, शहर वा नगर) ।',
'Terms of Service': 'सेवाको निति',
'Tertiary Server (Optional)': 'क्षेत्र सर्वर (वैकल्पिक)',
'Text': 'टेक्स्ट',
'The area is': 'क्षेत्र हो',
'The Area which this Site is located within.': 'क्षेत्र जसमा यो स्थान रहेको छ ।',
'The attribute used to determine which features to cluster together (optional).': 'कुन विशेषताहरू हरूलाइ सँगै राख्ने सो देखाउनलाई प्रयोग भएको(वैकल्पिक) ',
'The attribute which is used for the title of popups.': 'पप्-अप(हरू)को शिर्षकको लागि प्रयोग भएको',
'The attribute within the KML which is used for the title of popups.': 'के.एम.एल. भित्रको आदेश जुन पप्-अप(हरू)को शिर्षकको लागि प्रयोग हुँदछ',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'के.एम.एल. भित्रको आदेश(हरू) जुन पप्-अप(हरू)को बनावटको लागि प्रयोग भएको छ । (आदेशहरूको बिचमा स्पेस प्रयोग गर्नुहोस्)',
'The body height (crown to heel) in cm.': 'से.मि.मा शरिरको उचाइ (शिर देखि पाइताला सम्म)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'ब्यक्ति/समूहको हालको स्थान, जुन साधारण(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'इमेल ठेगाना जसमा प्रमाणिकरण अनुरोधहरू पठाइएको छ (साधारणतया यसमा इमेल ठेगानाको समूहहरू हुन्छन्, ब्यक्तिगत ठेगाना होइन) । यदि क्षेत्र खालि भएमा, र डोमेन मिलेमा अनुरोधहरू स्वचालितरूपमा नै प्रमाणित हुनेछ ।',
'The facility where this position is based.': 'यो पदरहेको क्षेत्रमा सुविधा',
'The first or only name of the person (mandatory).': 'ब्यक्तिको पहिलो नाम वा मात्र नाम (आवश्यक).',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'यू.आर.एल.को बनावट हुन्छ: http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.',
'The language you wish the site to be displayed in.': 'तपाईंले क्षेत्रमा देखियोस् भनेर चाहानु भएको भाषा ',
'The length is': 'लाम्बाइ हो',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'स्थान, जहाँबाट ब्यक्ति आएको हो, जुन साधारण छ(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'स्थान, जहाँ ब्यक्ति जाँदैछ, जुन साधारण छ(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The map will be displayed initially with this latitude at the center.': 'शुरुमा नक्सा यो अक्षांशमा बिचमा देखिनेछ ',
'The map will be displayed initially with this longitude at the center.': 'शुरुमा नक्सा यो देशान्तरमा बिचमा देखिनेछ ',
'The Maximum valid bounds, in projected coordinates': 'नियन्त्रित अवस्थामा बढि मान्य क्षेत्र',
'The minimum number of features to form a cluster. 0 to disable.': 'समूह बनाउनको निम्ति विशेषताहरूको कम्ति संख्या । निस्कृय गर्नको निम्ति 0 ',
'The name to be used when calling for or directly addressing the person (optional).': 'ब्यक्तिलाई सिधै बोलाउँदा प्रयोग गर्दा बोलाईने नाम(वैकल्पिक) ।',
'The number of pixels apart that features need to be before they are clustered.': 'विशेषताहरूलाई समूहमा राख्न भन्दा पहिले पिक्सेलको संख्या ।',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'अपलोड गर्नको निम्ति देखिने नक्साको वरिपरिको टाइलको संख्या । सुन्यको अर्थ, पहिलो पृष्ठ छिटो लोड हुन्छ, ठूलो संख्यको अर्थ ढिलो हुँदै जाने भन्ने हुन्छ ।',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'संस्था दर्ताले क्षेत्रमा काम गरिरहेको सम्पूर्ण राहत संस्थाहरूलाई ट्र्याकमा राख्दछ ।',
"The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'परियोजना भाग परियोजना जानकारी र को कहाँ के गरिरहेको छ भन्ने प्रतिवेदन(हरू) विवरण राख्नको निम्त प्रयोग गर्न सकिन्छ ।',
"The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "प्रदान गरिएको 'फर्मड' अमान्य छ । यो सर्वरमा उपलब्ध नभएको फारम पुन:अवलोकन तपाईंले छान्नु भएको छ ।",
"The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "उपलब्ध गरिएको 'फर्मड' अमान्य छ । फारम अपलोड गर्नुहोस् भन्ने भाग अमान्य छ । अपलोड गर्न प्रयास् गर्नुहोस् ।",
"The staff member's official job title": 'कर्मचारी सदस्यको संस्थागत काम शिर्षक',
'The system supports 2 projections by default:': 'स्वचालित प्रकृया सहयोगहरू २ योजनाहरू:',
'The uploaded Form is unreadable, please do manual data entry.': 'संचित गरिएको फारम पढ्न नसकिने, विस्तृ आँकडा प्रवेश गर्नुहोस् ।',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'वेव नक्साको क्षमता प्राप्त गर्नुहोस् पृष्ठको यू.आर.एल. जस्को तह(हरू) तपाईंले नक्सामा ब्राउजरको माध्यमबाट उपलब्ध गराउन चाहानुहुन्छ ।',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'तस्बिर फाइलको यू.आर.एल. । यदि तपाईं तस्विर फाइल अपलोड गर्नुहुन्न भने होस्, यस्को स्थान यहाँ देखाउनुपर्छ ।',
'The URL to access the service.': 'सेवा पहुँचको निम्ति यू.आर.एल. ',
"The volunteer's role": 'स्वयम्-सेवकको भूमिका',
'The weight in kg.': 'तौल केजीमा',
'Theme': 'स्वरूप',
'Theme added': 'स्वरूप संचित गरियो',
'Theme added to Activity': 'कृयाकलापमा स्वरूप संचित गरियो',
'Theme added to Project': 'परियोजनामा स्वरूप संचित गरियो',
'Theme added to Project Location': 'परियोजना स्थानमा स्वरूप संचित गरियो',
'Theme Data': 'स्वरूप आँकडा',
'Theme Data deleted': 'स्वरूप आँकडा हटाइयो',
'Theme Data updated': 'स्वरूप आँकडा परिमार्जन गरियो',
'Theme deleted': 'स्वरूप हटाइयो',
'Theme Details': 'स्वरूप विवरण',
'Theme Layer': 'स्वरूप तह',
'Theme removed from Activity': 'कृयाकलापमा स्वरूप हटाइयो',
'Theme removed from Project': 'परियोजनामा स्वरूप हटाइयो',
'Theme removed from Project Location': 'परियोजना स्थानमा स्वरूप हटाइयो',
'Theme updated': 'स्वरूप परिमार्जन गरियो',
'Themes': 'स्वरूपहरू',
'There are multiple records at this location': 'यस स्थानमा बहु विवरणहरू छन्',
"There are no details for this person yet. Add Person's Details.": 'यस ब्यक्तिको निम्ति अहिले सम्म कुनै विवरण छैन ।',
'There are too many features, please Zoom In': 'धेरै विशेषताहरू छन्, कृपया नजिक ल्याउनुहोस्',
'There is no address for this person yet. Add new address.': 'यो ब्यक्तिको निम्ति अहिलेसम्म कुनै ठेगाना छैन । नयाँ ठेगाना राख्नुहोस्.',
'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'क्षहिलेसम्म यस %(site_label)s को लागि कुनै अवस्था छैन । अवस्था %(site_label)s राख्नुहोस् ।',
'There was a problem, sorry, please try again later.': 'समस्या थियो, माफगर्नुहोला, कृपया पछि प्रयास गर्नुहोला ।',
'These are the filters being used by the search.': 'खोजीद्वारा प्रयोग गरिएको फिल्टरहरू छन् ।',
'These need to be added in Decimal Degrees.': 'यो अंक डिग्रीमा संचित गरिनु पर्छ ।',
'This email-address is already registered.': 'यो इमेल-ठेगाना पहिले नै दर्ता गरिएको छ ।',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'यदि यो स्तर निर्माण प्रकृयामा रहेको छ भने यो सहि हो । यो स्तर पुरा भएपछि, आपतकालिन परिवर्तन हुन नदिनको निम्ति यसलाई गलत भनेर राख्न सक्नुहुन्छ ।',
'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'यो साधारणतया नक्साको तह प्रपटिको स्टाइल ट्यबमा विजेट प्रयोग गरि परिवर्तन गरिएको हो ।',
'This job has already been finished successfully.': 'यो काम पहिलेनै सफलतापुर्वक समाप्त भएको छ ।',
'This level is not open for editing.': 'यो स्तर परिवर्तनको लागि खुला छैन ।',
'This role can not be assigned to users.': 'यो भूमिका प्रयोगकर्ता(हरू)लाई हस्तान्तरण गर्न सकिँदैन ।',
'This should be an export service URL, see': 'यो निर्यात सेवा यू.आर.एल. हुनुपर्छ, हेर्नुहोस्',
'Thunderbolt': 'चट्याङ्ग',
'Tiled': 'टाइल हालिएको',
'Time': 'समय',
'Time Actual': 'वास्तविक समय',
'Time Estimate': 'अडकल समय',
'Time Estimated': 'अडकल गरिएको समय',
'Time Frame': 'समय अवधी',
'Time Log': 'समय सूची',
'Time Log Deleted': 'समय दर्ताहटाइयो',
'Time Log Updated': 'समय दर्तापरिमार्जन गरियो',
'Time Logged': 'सूचिकृत समय',
'Time Taken': 'लागेको समय',
'Timeline': 'समयसीमा',
'times': 'समय(हरू)',
'times (0 = unlimited)': 'समयs (0 = असिमित)',
'times and it is still not working. We give in. Sorry.': 'समय(हरू) र यो अझै काम गरिरहेको छैन । माफ गर्नुहोला',
'Title': 'शिर्षक',
'Title to show for the Web Map Service panel in the Tools panel.': 'टुल्स् प्यानलमा वेव नक्सा सेवा प्यानलको लागि देखाउने शिर्षक ',
'TMS Layer': 'टि.एम.एस. तह',
'to download a OCR Form.': 'ओ.सि.आर. फारम अपलोड गर्न',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'खुलासडकनक्सा परिवर्तन गर्नको निम्ति, तपाईंको नक्सा बनावटमा खुलासडकनक्सा सेटिङको आवस्यकता पर्दछ ।',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'समयसिमा सार्न: माउस्को स्क्रोल ह्विल प्रयोग गर्नुहोस्, एरो कि वा तान्नुहोस् र समयसिमालाई ड्र्याग गर्नुहोस् ।',
'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'प्रिन्ट वा बाँडनको लागि तपाईंले नक्साको स्क्रिनसट लिनु पर्नेहुन्छ । यदि स्क्रिनसट लिनको निम्ति तपाईंलाई सहयोग चाहिन्छ भने, %(windows)s र %(mac)s को लागि यि निर्देशनहरूलाई हेर्नुहोस् ।',
'to reset your password': 'पासवर्ड परिवर्तन गर्न',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "स्थानको खोजीको लागि, नाम प्रवेश गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
"To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "सदस्य खोजीको निम्ति, ब्यक्ति वा समूहको नामको कुनै भाग टाइप गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "ब्यक्तिको खोजीको निम्ति, स्पेस्ले छुटाएर पहिलो, बिचको वा अन्तिमको नाम र/वा ब्यक्तिको आइ.डि. संख्या मध्ये कुनै टाइप गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
'tonsure': 'टनस्यूर',
'Tools and Guidelines Development': 'औजार(हरू) र निर्देशन विकास',
'total': 'जम्मा',
'Total': 'जम्मा',
'Total Annual Budget': 'जम्मा वार्षिक बजेट',
'Total Funding (Local Currency)': 'जम्मा अनुदान (स्थानिय मुद्रा)',
'Total Funding Amount': 'जम्मा अनुदान मात्रा',
'Total Persons': 'जम्मा ब्यक्ति(हरू)',
'Total Population': 'जम्मा जनसंख्या',
'Total Records: %(numrows)s': 'जम्मा विवरणहरू: %(numrows)s',
'Tourist Group': 'पर्यटक समूह',
'Trackable': 'ट्र्याक गर्न सकिने',
'Tracking and analysis of Projects and Activities.': 'परियोजना र कार्यक्रमहरुको ट्र्याकिङ्ग',
'Training': 'तालिम',
'Training added': 'तालिम संचित गरियो',
'Training Course Catalog': 'तालिम पाठ्यक्रम तालिका',
'Training Courses': 'तालिम कोर्सहरु',
'Training deleted': 'तालिम हटाइयो',
'Training Details': 'तालिम विवरण',
'Training Event': 'तालिम विवरण',
'Training Event added': 'तालिम विवरण संचित गरियो',
'Training Event deleted': 'तालिम विवरण हटाइयो',
'Training Event Details': 'तालिम विवरण विवरण',
'Training Event updated': 'तालिम विवरण परिमार्जन गरियो',
'Training Events': 'तालिम विवरण',
'Training Facility': 'तालिम सुविधा',
'Training Hours (Month)': 'तालिम समय (घण्टा) (महिना)',
'Training Hours (Year)': 'तालिम समय (घण्टा) (वर्ष)',
'Training of Community/First Responders': 'समुदाय/पहिलो प्रतिकृया दिने ब्यक्तिको तालिम ',
'Training of Master Trainers/Trainers': 'प्रशिक्षक प्रशिक्षण तालिम',
'Training Report': 'तालिम प्रतिवेदन',
'Training updated': 'तालिम परिमार्जन गरियो',
'Trainings': 'तालिम(हरू)',
'Transfer': 'पठाउनुहोस्',
'Transit': 'द्वार',
'Transitional Shelter': 'संक्रमणकालिन आवास',
'Transparent?': 'पारदर्शक?',
'Tree and Mangrove Planting': 'रुख योजना',
'Type': 'प्रकार',
"Type the first few characters of one of the Participant's names.": 'सहभागीको नामको पहिलो शब्दहरू टाइप गर्नुहोस्',
"Type the first few characters of one of the Person's names.": 'ब्यक्तिको नामको पहिलो शब्दहरू टाइप गर्नुहोस्',
'UN agency': 'यू.एन. एजेन्सि',
'Unable to parse CSV file or file contains invalid data': 'सि.एस.भि. फाइल सुचारु हुन सकेन वा फाइलमा अमान्य आँकडा रहेको',
'Uncheck all': 'सबैको चिन्ह हटाउनुहोस्',
'United States Dollars': 'संयूक्त राज्य डलर',
'Units': 'इकाई(हरू)',
'Unknown': 'थाहा नभएको',
'unknown': 'विवरणहरू हटाइयो',
'unlimited': 'असिमित',
'Unmark as duplicate': 'नक्कल प्रतिको रूपमा चिन्ह हटाउनुहोस्',
'Unspecified': 'नतोकिएको',
'Unsupported data format': 'नमिल्ने आँकडा नमुना',
'Unsupported method': 'नमिल्ने शैली',
'UPDATE': 'परिमार्जन गर्नुहोस्',
'Update Coalition': 'संस्था परिमार्जन गर्नुहोस्',
'Update Report': 'प्रतिवेदन परिमार्जन गर्नुहोस्',
'Update this entry': 'यो प्रवेश परिमार्जन गर्नुहोस्',
'updated': 'परिमार्जन गरियो',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'तस्विर फाइल (png वा jpeg), बढिमा ४00x४00 pixels! अपलोड गर्नुहोस् ',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'तस्विर फाइल यहाँ अपलोड गर्नुहोस् । तस्विर फाइल अपलोड गर्नु भएन भने, तपाईंले यसको यू.आर.एल. क्षेत्र देखाउनै पर्छ ।',
'Upload Format': 'नमुना अपलोड गर्नुहोस्',
'Upload Scanned OCR Form': 'स्क्यान गरिएको ओ.सि.आर. फारम अपलोड गर्नुहोस् ',
'Upload Shapefile': 'आकारफाइल अपलोड गर्नुहोस् ',
'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'अपलोड गरिएको फाईल पि.डि.एफ. फाइल होइन । सहि पि.डि.एफ. फाइल उपलब्ध गराउनुहोस्',
"Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "अपलोड गरिएका फाइल(हरू) तस्विर(हरू) होइनन् । लिने तस्विर नमुनाहरू '.png', '.jpg', '.bmp', '.gif' आदि हुन् ।",
'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'संचित गरिएको पि.डि.एफ. फाइलमा आवश्यक भन्दा बढि/कम पृष्ठ संख्या रहेको छ । तपाईंले सहि फारम उपलब्ध गराउनु भयो भएन, वा फारममा भएको पृष्ठ संख्याहरू सहि छन् कि छैनन् जाँच गर्नुहोस् ।',
'Urban Risk & Planning': 'शहरी जोखिम र योजना',
'Urgent': 'तत्काल',
'URL': 'यू.आर.एल.',
'URL to a Google Calendar to display on the project timeline.': 'परियोजना समयमा देखाउनको निम्ति गुगल पात्रोको लागि यू.आर.एल.',
'Use decimal': 'बिन्दु प्रयोग',
'Use default': 'स्वचलान प्रयोग',
'Use deg, min, sec': 'मिनेट, सेकेण्ड प्रयोग',
'Use Geocoder for address lookups?': 'ठेगानाको निम्ति जिओकोड प्रयोग गर्नुहोस् ।',
'Use Site?': 'क्षेत्र प्रयोग?',
'Use this to set the starting location for the Location Selector.': 'स्थान छान्नेको लागि शुरू स्थान राख्नको निम्ति यो प्रयोग गर्नुहोस् ।',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'प्रकारहरूको फरक छुट्याउनको लागि अनहोवर टुलकिट तथा समूह पप्-अपमा प्रयोग गरिएको ।',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'विवरणहरू छुट्याउनको निम्ति अनहोवर टुलकिट बनाउनको लागि प्रयोग भएको र समूह पप्-अपमा पनि प्रयोग भएको',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'प्रवेश गरिएको स्थानको आक्षांश ठिक छ कि छैन भनेर जाँच गर्नको निम्ति प्रयोग गरिएको । स्थानहरू भएको स्रोतहारूको तालिका फिल्टर गर्नको निम्ति प्रयोग गर्न सकिने ।',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'प्रवेश गरिएको स्थानको देशान्तरहरू ठिक छ कि छैन भनेर जाँच गर्नको निम्ति प्रयोग गरिएको । स्थानहरू भएको स्रोतहारूको तालिका फिल्टर गर्नको निम्ति प्रयोग गर्न सकिने ।',
'Used to populate feature attributes which can be used for Styling.': 'स्टाइलिङको लागि प्रयोग हुने विशेष कार्यहरूलाई सहयोग गर्नको निम्ति प्रयोग भएको ।',
'User': 'प्रयोगकर्ता',
'User Account': 'प्रयोगकर्ता एकाउन्ट',
'User added to Role': 'भूमिकामा प्रयोगकर्तालाई संचित गरियो ',
'User Profile': 'प्रयोगकर्ता प्रोफाइल',
'User Roles': 'प्रयोगकर्ता भूमिकाहरू',
'User with Role': 'भूमिकासहितको प्रयोगकर्ता',
'Username': 'प्रयोगकर्तानाम',
'Users': 'प्रयोगकर्ताहरू',
'Users in my Organizations': 'मेरो संस्था(हरू) मा प्रयोगकर्ता(हरू)',
'Users with this Role': 'यस भूमिकाको प्रयोगकर्ता(हरू)',
'Uses the REST Query Format defined in': 'परिभाषित गरिएको सोधपुछ नमुना यथास्थितिमा ल्याउन प्रयोग',
'using default': 'स्वचलित प्रयोग गरिँदै',
'Valid From': 'देखि मान्य',
'Valid Until': 'सम्म मान्य',
'Validation error': 'मान्यता गल्ति',
'Value': 'महत्व',
'Value per Pack': 'प्रति एकाई मूल्य',
'VCA (Vulnerability and Capacity Assessment)': 'संकटासन्नता र क्षमता लेखाजोखा',
'Vector Control': 'भेक्टोर नियन्त्रण',
'Verified': 'प्रमाणित गर्ने',
'Version': 'भर्सन',
'Very Good': 'धेरै राम्रो',
'Very Strong': 'धेरै बलियो',
'Video Tutorials': 'भिडियो ट्युटरियल्सहरू',
'View': 'हेर्नुहोस्',
'View full screen': 'पूर्ण स्क्रिन हेर्नुहोस्',
'View Fullscreen Map': 'पूर्णस्क्रिन नक्सा हेर्नुहोस्',
'View Location Details': 'स्थान विवरण हेर्नुहोस्',
'View on Map': 'नक्सा हेर्नुहोस्',
'Vocational Training and Employment Skills': 'छुट्टिमा लइने तालिम र रोजगार सिप(हरू)',
'Volunteer': 'स्वयम्-सेवक',
'Volunteer added': 'स्वयम्-सेवक संचित गरियो',
'Volunteer and Staff Management': 'स्वयम्सेवक र कर्मचारी ब्यवस्थापन',
'Volunteer Cluster': 'स्वयम्-सेवक समूह',
'Volunteer Cluster added': 'स्वयम्-सेवक समूह संचित गरियो',
'Volunteer Cluster deleted': 'स्वयम्-सेवक समूह हटाइयो',
'Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद',
'Volunteer Cluster Position added': 'स्वयम्-सेवक समूह पद संचित गरियो',
'Volunteer Cluster Position deleted': 'स्वयम्-सेवक समूह पद हटाइयो',
'Volunteer Cluster Position updated': 'स्वयम्-सेवक समूह पद परिमार्जन गरियो',
'Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार',
'Volunteer Cluster Type added': 'स्वयम्-सेवक समूह प्रकार संचित गरियो',
'Volunteer Cluster Type deleted': 'स्वयम्-सेवक समूह प्रकार हटाइयो',
'Volunteer Cluster Type updated': 'स्वयम्-सेवक समूह प्रकार परिमार्जन गरियो',
'Volunteer Cluster updated': 'स्वयम्-सेवक समूह परिमार्जन गरियो',
'Volunteer deleted': 'स्वयम्-सेवक हटाइयो',
'Volunteer Details': 'स्वयम्-सेवक विवरण',
'Volunteer Details updated': 'स्वयम्-सेवक विवरण परिमार्जन गरियो',
'Volunteer Hours': 'स्वयम्-सेवक समय (घण्टा)',
'Volunteer Insurance': 'स्वयम्-सेवक विमा',
'Volunteer Management': 'स्वयम्-सेवक ब्यबस्थापन',
'Volunteer Recognition': 'स्वयम्-सेवक सम्मान',
'Volunteer Record': 'स्वयम्-सेवक विवरण',
'Volunteer Recruitment': 'स्वयम्-सेवक नियुक्ति',
'Volunteer Report': 'स्वयम्-सेवक प्रतिवेदन',
'Volunteer Role': 'स्वयमसेवकको भूमिका',
'Volunteer Role added': 'स्वयमसेवकको भूमिका संचित गरियो',
'Volunteer Role Catalog': 'स्वयमसेवकको भूमिका तालिका',
'Volunteer Role deleted': 'स्वयमसेवकको भूमिका हटाइयो',
'Volunteer Role Details': 'स्वयमसेवकको भूमिका विवरण',
'Volunteer Role updated': 'स्वयमसेवकको भूमिका परिमार्जन गरियो',
'Volunteer Roles': 'स्वयमसेवकको भूमिका',
'Volunteer Service Record': 'स्वयम्-सेवक सेवा विवरण',
'Volunteer Training': 'स्वयम्-सेवक तालिम',
'Volunteering in Emergencies Guidelines/Toolkit': 'आपतकालिन निर्देशन/टुलकिटमा स्वयम्-सेवक कार्य',
'Volunteering in Pandemic Emergency Situations': 'माहामरी आपतकालिन अवस्था(हरू)मा स्वयम्-सेवक कार्य',
'Volunteers': 'स्वयम्-सेवक(हरू)',
'Vulnerability': 'संकटासन्नता',
'Vulnerable Populations': 'संकटासन्नता जनताहरु',
'Warehouse': 'गोदामघर',
'Warehouse Manager': 'भण्डार व्यवस्थापक',
'Warehouse Stock': 'भण्डार सामान',
'Warehouse Type': 'प्रकार',
'Warehouses': 'गोदामघरहरु',
'WARNING': 'चेतावनि',
'Water': 'पानी',
'Water and Sanitation': 'खानेपानी र सरसफाई',
"Water, Sanitation & Hygiene": 'खानेपानी, सरसफाई तथा स्वच्छता',
'Water Supply': 'पानी आपूर्ती',
'Water Testing': 'पानी परिक्षण',
'Watsan': 'वाटसन्',
'Watsan Officer': 'वाटसन् कर्मचारी',
'Watsan Technician': 'वाटसन् प्राविधिक',
'wavy': 'गिलो',
'We have tried': 'हामिले प्रयास गर्यौं',
'Weak': 'कमजोर',
'Weather': 'मौसम',
'Web Map Service': 'वेभ नक्सा सेवा',
'Web Map Service Browser Name': 'वेव नक्सा सेवा ब्राउजर नाम',
'Web Map Service Browser URL': 'वेव नक्सा सेवा ब्राउजर यू.आर.एल.',
'Website': 'वेवसाइट',
'Week': 'हप्ता',
'Weekends only': 'हप्ताको अन्त्यमा मात्र',
'Weekly': 'साप्ताहिक',
'Weight': 'तौल',
'Weight (kg)': 'तौल (केजि)',
'Well-Known Text': 'राम्ररी थाहा भएको-शब्द',
'WFS Layer': 'डब्ल्यू.एफ.एस. तह',
'WGS84 (EPSG 4236) is required for many WMS servers.': 'धेरै डब्लु.एम.एस. सर्वरहरूका लागि डब्ल्यु.जि.एस.८४ (इ.पि.एस.जि. ४२३6) आवश्यक छ',
'What order to be contacted in.': 'कुन तरिकामा सम्पर्क गर्ने',
'When this search was last checked for changes.': 'जब यो खोजी परिवर्तनको लागि अन्तिममा जाँच गरियो ',
'Whether calls to this resource should use this configuration as the default one': 'यो बनावटलाइ स्रोतले स्वचालितरूपमा प्रयोग गर्नकोलागि हो होइन',
'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': ' फरकरूपमा बनावट प्रवेशको साटो अक्षांश तथा देशान्तर स्थान बनावटमाउच्चमा छ कि छैन ।',
'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'आधारभुत स्थान प्रयोग गर्नुको साटो स्रोतलाई एस ३ट्र्याक प्रयोग गरि ट्र्याक गर्ने कि नगर्ने',
'Whiskers': 'दारी',
'white': 'सेतो',
'Who is doing What Where': 'को कहाँ के गर्दैछ',
'wider area, longer term, usually contain multiple Activities': 'ठूलो क्षेत्र, लामो शब्द, साधारणतया बहुमुखी कृयाकलाप(हरू)समाबेश गर्दछ',
'widowed': 'बिधुवा',
'Will create and link your user account to the following records': 'तलको विवरणमा तपाईंको एकाउन्ट जोड्ने र बनाउनेछ ।',
'With best regards': 'स-धन्यवाद',
'WKT is Invalid!': 'डब्लु.के.टि. अमान्य!',
'WMS Layer': 'डब्लु.एम.एस. तह',
'Work': 'काम',
'Work on Program': 'कार्यक्रममा काम',
'X-Ray': 'एक्स-रे',
'XML parse error': 'एक्स्.एम.एल. गल्ती',
'XSLT stylesheet not found': 'एक्स.एस.एल.टि. स्टाइलसिट प्राप्त भएन',
'XSLT transformation error': 'एक्स.एस.एल.टि. परिवर्तन गल्ती',
'XYZ Layer': 'एक्स.वाइ.जेट. तह',
'Year': 'वर्ष',
'Year that the organization was founded': 'संस्था स्थापना गरिएको वर्ष',
'yes': 'हुन्छ',
'Yes': 'हुन्छ',
'You can click on the map below to select the Lat/Lon fields': 'ल्याट/लोन क्षेत्र(हरू)लाई छान्नको लागि तलको नक्सामा क्लिक गर्नुहोस् ।',
"You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "तपाईले समूह नाम, ब्याख्या वा टिप्पणीहरू र संस्था नाम वा सम्बन्धित नामको आधारमा खोज्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । तालिकामा सबै नराखिकन 'खोजी'मा क्लिक गर्नुहोस् ।",
"You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "पाठ्यक्रम नाम, स्थान नाम वा कार्यक्रम टिप्पणी(हरू)को आधारमा खोजी । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै कार्यक्रमहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
"You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "काम शिर्षक वा ब्यक्ति नाम अनुसार खोजी गर्न सक्नुहुन्छ - कुनैपनि पहिलो, बीचको वा अन्तिम नामहरू, स्पेसद्वारा छुटाएर टाइप गर्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै व्यक्तिहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
'You can search by name, acronym or comments': 'नाम, मिल्दोनाम वा टिप्पणी(हरू)को आधारमा खोजी गर्न सक्नुहुन्छ ।',
'You can search by name, acronym, comments or parent name or acronym.': 'नाम, मिल्दो नाम, टिप्पणी(हरू) वा परिवार नाम वा मिल्दोनामको आधारमा खोजी गर्न सक्नुहुन्छ ।',
"You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "ब्यक्तिको नामको आधारमा खोजी गर्न सक्नुहुन्छ- कुनैपनि पहिलो, बीचको वा अन्तिमको नाम(हरू), स्पेसले छुट्यएर टाइप गर्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै व्यक्तिहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
"You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "तपाईंले तालिम दिने व्यक्तिको नाम, पाठ्यक्रम नाम वा टिप्पणी(हरू). तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै तालिम दिने व्यक्तिहरूली तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
'You can select an area on the image and save to crop it.': 'तपाईंले तस्बिरको क्षेत्रमा छानेर र काट्नको निम्ति संचित गर्न सक्नुहुन्छ ।',
'You can select the Draw tool': 'तपाईंले चित्र बनाउने टुल छान्न सक्नुहुन्छ',
'You can set the modem settings for SMS here.': 'एस.एम.एस.को लागि तपाईंले यहाँ मोडेम मिलाउन सक्नुहुन्छ ।',
'You do not have permission for any facility to perform this action.': 'यो कार्य गर्नको निम्ति तपाईंसँग कुनैपनि सुविधा छैन ।',
'You do not have permission for any organization to perform this action.': 'यो कार्य गर्नको निम्ति तपाईंसँग कुनैपनि संस्था छैन ।',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "तपाईंसँग संचित नगरिएको परिवर्तन(हरू) छन् । रद्द गर्नुहोस् र संचित गर्नको निम्ति 'संचित' मा क्लिक गर्नुहोस् । अस्विकार गर्नको निम्ति 'हुन्छ' मा क्लिक गर्नुहोस् ।",
'You have unsaved changes. You need to press the Save button to save them': "तपाईंसँग संचित नगरिएको परिवर्तन(हरू) छन् । तिनिहरूलाई संचित गर्नको निम्ति 'संचित' बटन थिच्नुहोस् ।",
'You must agree to the Terms of Service': 'तपाईंले सेवाको नियमलाई मान्नै पर्छ ।',
'You must enter a minimum of %d characters': 'कम्ति तपाईंले %d शव्दहरू प्रवेश गर्नैपर्छ ।',
'You need to have at least 2 records in this list in order to merge them.': 'मिलाउनको निम्ति तपाईंसँग तालिकामा कम्तिमा २ विवरणहरू हुनै पर्छ ।',
'Your name for this search. Notifications will use this name.': 'यो खोजीको लागि तपाईंको नाम । सूचना घण्टि(हरू) ले यो नाम प्रयोग गर्नेछ ।',
'Your request for Red Cross and Red Crescent Resource Management System (RMS) has been approved and you can now access the system at': 'रेड क्रस र रेड क्रिसेन्ट स्रोत ब्यबस्थापन प्रकृया (आर.एम.एस.) को लागि तपाईंको अनुरोध प्रमाणित भएको छ र अब तपाईं प्रकृयामा पहुँच प्राप्त गर्न सक्नुहुन्छ ।',
'Youth and Volunteer Development': 'युवा तथा स्वयंसेवक विकास',
'Youth Development': 'युवा विकास',
'Youth Leadership Development': 'जवान अगुवाइ विकास',
'Zonal': 'अञ्चल',
'Zone': 'क्षेत्र',
'Zoom': 'नजिक ल्याउनुहोस्',
'Zoom In': 'नजिक ल्याउनुहोस्',
'Zoom in closer to Edit OpenStreetMap layer': 'खुलासडकनक्सा तहलाई नजिक तान्नुहोस्',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'नजिक ल्याउनुहोस्: नक्सामा क्लिक गर्नुहोस् वा माउसको वायाँ बटन थिच्नुहोस् र चतुर्भुज बनाउनुहोस्',
'Zoom Levels': 'जुम स्तरहरू',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'टाढा लानुहोस्: नक्सामा क्लिक गर्नुहोस् वा माउँसको वायाँ बटम थिच्नुहोस् र चत्तुर्भुजा बनाउनको निम्ति तान्नुहोस्',
'Zoom to Current Location': 'हालको स्थानलाई नजिक तान्नुहोस्',
'Zoom to maximum map extent': 'बढि नक्सा क्षेत्र देखिने गरि नजिक तान्नुहोस्',
}
| mit |
foxban/qemu-1.1.1-centos5-rpm | scripts/tracetool/format/h.py | 98 | 1062 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .h file.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def begin(events):
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE_H',
'#define TRACE_H',
'',
'#include "qemu-common.h"')
def end(events):
for e in events:
if "disable" in e.properties:
enabled = 0
else:
enabled = 1
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
out('',
'#endif /* TRACE_H */')
def nop(events):
for e in events:
out('',
'static inline void trace_%(name)s(%(args)s)',
'{',
'}',
name = e.name,
args = e.args,
)
| gpl-2.0 |
hdinsight/hue | desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py | 92 | 16222 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'productCode':
self.append(value)
class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'billingProduct':
self.append(value)
class Image(TaggedEC2Object):
"""
Represents an EC2 Image
"""
def __init__(self, connection=None):
super(Image, self).__init__(connection)
self.id = None
self.location = None
self.state = None
self.ownerId = None # for backwards compatibility
self.owner_id = None
self.owner_alias = None
self.is_public = False
self.architecture = None
self.platform = None
self.type = None
self.kernel_id = None
self.ramdisk_id = None
self.name = None
self.description = None
self.product_codes = ProductCodes()
self.billing_products = BillingProducts()
self.block_device_mapping = None
self.root_device_type = None
self.root_device_name = None
self.virtualization_type = None
self.hypervisor = None
self.instance_lifecycle = None
self.sriov_net_support = None
def __repr__(self):
return 'Image:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Image, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'billingProducts':
return self.billing_products
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.id = value
elif name == 'imageLocation':
self.location = value
elif name == 'imageState':
self.state = value
elif name == 'imageOwnerId':
self.ownerId = value # for backwards compatibility
self.owner_id = value
elif name == 'isPublic':
if value == 'false':
self.is_public = False
elif value == 'true':
self.is_public = True
else:
raise Exception(
'Unexpected value of isPublic %s for image %s' % (
value,
self.id
)
)
elif name == 'architecture':
self.architecture = value
elif name == 'imageType':
self.type = value
elif name == 'kernelId':
self.kernel_id = value
elif name == 'ramdiskId':
self.ramdisk_id = value
elif name == 'imageOwnerAlias':
self.owner_alias = value
elif name == 'platform':
self.platform = value
elif name == 'name':
self.name = value
elif name == 'description':
self.description = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'instanceLifecycle':
self.instance_lifecycle = value
elif name == 'sriovNetSupport':
self.sriov_net_support = value
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the image's state information by making a call to fetch
the current image attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
image the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_images([self.id], dry_run=dry_run)
if len(rs) > 0:
img = rs[0]
if img.id == self.id:
self._update(img)
elif validate:
raise ValueError('%s is not a valid Image ID' % self.id)
return self.state
def run(self, min_count=1, max_count=1, key_name=None,
security_groups=None, user_data=None,
addressing_type=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, security_group_ids=None,
additional_info=None, instance_profile_name=None,
instance_profile_arn=None, tenancy=None, dry_run=False):
"""
Runs this instance.
:type min_count: int
:param min_count: The minimum number of instances to start
:type max_count: int
:param max_count: The maximum number of instances to start
:type key_name: string
:param key_name: The name of the key pair with which to
launch instances.
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances.
:type user_data: string
:param user_data: The Base64-encoded MIME user data to be made
available to the instance(s) in this reservation.
:type instance_type: string
:param instance_type: The type of instance to run:
* t1.micro
* m1.small
* m1.medium
* m1.large
* m1.xlarge
* m3.medium
* m3.large
* m3.xlarge
* m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cr1.8xlarge
* hi1.4xlarge
* hs1.8xlarge
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
* t2.micro
* t2.small
* t2.medium
:type placement: string
:param placement: The Availability Zone to launch the instance into.
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances.
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances.
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on
the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can
optionally use this parameter to assign the instance a
specific available IP address from the subnet (e.g.,
10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or terminates on instance-initiated shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type additional_info: string
:param additional_info: Specifies additional information to make
available to the instance(s).
:type security_group_ids: list of strings
:param security_group_ids: The ID of the VPC security groups with
which to associate instances.
:type instance_profile_name: string
:param instance_profile_name: The name of
the IAM Instance Profile (IIP) to associate with the instances.
:type instance_profile_arn: string
:param instance_profile_arn: The Amazon resource name (ARN) of
the IAM Instance Profile (IIP) to associate with the instances.
:type tenancy: string
:param tenancy: The tenancy of the instance you want to
launch. An instance with a tenancy of 'dedicated' runs on
single-tenant hardware and can only be launched into a
VPC. Valid values are:"default" or "dedicated".
NOTE: To use dedicated tenancy you MUST specify a VPC
subnet-ID as well.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
"""
return self.connection.run_instances(self.id, min_count, max_count,
key_name, security_groups,
user_data, addressing_type,
instance_type, placement,
kernel_id, ramdisk_id,
monitoring_enabled, subnet_id,
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address, placement_group,
security_group_ids=security_group_ids,
additional_info=additional_info,
instance_profile_name=instance_profile_name,
instance_profile_arn=instance_profile_arn,
tenancy=tenancy, dry_run=dry_run)
def deregister(self, delete_snapshot=False, dry_run=False):
return self.connection.deregister_image(
self.id,
delete_snapshot,
dry_run=dry_run
)
def get_launch_permissions(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
return img_attrs.attrs
def set_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'add',
user_ids,
group_names,
dry_run=dry_run)
def remove_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'remove',
user_ids,
group_names,
dry_run=dry_run)
def reset_launch_attributes(self, dry_run=False):
return self.connection.reset_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
def get_kernel(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'kernel',
dry_run=dry_run
)
return img_attrs.kernel
def get_ramdisk(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'ramdisk',
dry_run=dry_run
)
return img_attrs.ramdisk
class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None
self.ramdisk = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self.attrs['block_device_mapping'] = BlockDeviceMapping()
return self.attrs['block_device_mapping']
else:
return None
def endElement(self, name, value, connection):
if name == 'launchPermission':
self.name = 'launch_permission'
elif name == 'group':
if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'productCode':
if 'product_codes' in self.attrs:
self.attrs['product_codes'].append(value)
else:
self.attrs['product_codes'] = [value]
elif name == 'imageId':
self.image_id = value
elif name == 'kernel':
self.kernel = value
elif name == 'ramdisk':
self.ramdisk = value
else:
setattr(self, name, value)
class CopyImage(object):
def __init__(self, parent=None):
self._parent = parent
self.image_id = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
| apache-2.0 |
kazemakase/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py2/solnlib/acl.py | 6 | 6169 | # Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module contains interfaces that support CRUD operations on ACL.
'''
import json
from . import splunk_rest_client as rest_client
from .packages.splunklib import binding
from .utils import retry
__all__ = ['ACLException',
'ACLManager']
class ACLException(Exception):
pass
class ACLManager(object):
'''ACL manager.
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
Usage::
>>> import solnlib.acl as sacl
>>> saclm = sacl.ACLManager(session_key, 'Splunk_TA_test')
>>> saclm.get('data/transforms/extractions')
>>> saclm.update('data/transforms/extractions/_acl',
perms_read=['*'], perms_write=['*'])
'''
def __init__(self, session_key, app, owner='nobody',
scheme=None, host=None, port=None, **context):
self._rest_client = rest_client.SplunkRestClient(session_key,
app,
owner=owner,
scheme=scheme,
host=host,
port=port,
**context)
@retry(exceptions=[binding.HTTPError])
def get(self, path):
'''Get ACL of /servicesNS/{`owner`}/{`app`}/{`path`}.
:param path: Path of ACL relative to /servicesNS/{`owner`}/{`app`}
:type path: ``string``
:returns: A dict contains ACL.
:rtype: ``dict``
:raises ACLException: If `path` is invalid.
Usage::
>>> aclm = acl.ACLManager(session_key, 'Splunk_TA_test')
>>> perms = aclm.get('data/transforms/extractions/_acl')
'''
try:
content = self._rest_client.get(
path, output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise ACLException('Invalid endpoint: %s.', path)
return json.loads(content)['entry'][0]['acl']
@retry(exceptions=[binding.HTTPError])
def update(self, path, owner=None, perms_read=None, perms_write=None):
'''Update ACL of /servicesNS/{`owner`}/{`app`}/{`path`}.
If the ACL is per-entity (ends in /acl), owner can be reassigned. If
the acl is endpoint-level (ends in _acl), owner will be ignored. The
'sharing' setting is always retrieved from the current.
:param path: Path of ACL relative to /servicesNS/{owner}/{app}. MUST
end with /acl or /_acl indicating whether the permission is applied
at the per-entity level or endpoint level respectively.
:type path: ``string``
:param owner: (optional) New owner of ACL, default is `nobody`.
:type owner: ``string``
:param perms_read: (optional) List of roles (['*'] for all roles). If
unspecified we will POST with current (if available) perms.read,
default is None.
:type perms_read: ``list``
:param perms_write: (optional) List of roles (['*'] for all roles). If
unspecified we will POST with current (if available) perms.write,
default is None.
:type perms_write: ``list``
:returns: A dict contains ACL after update.
:rtype: ``dict``
:raises ACLException: If `path` is invalid.
Usage::
>>> aclm = acl.ACLManager(session_key, 'Splunk_TA_test')
>>> perms = aclm.update('data/transforms/extractions/_acl',
perms_read=['admin'], perms_write=['admin'])
'''
if not path.endswith('/acl') and not path.endswith('/_acl'):
raise ACLException(
'Invalid endpoint: %s, must end with /acl or /_acl.' % path)
curr_acl = self.get(path)
postargs = {}
if perms_read:
postargs['perms.read'] = ','.join(perms_read)
else:
curr_read = curr_acl['perms'].get('read', [])
if curr_read:
postargs['perms.read'] = ','.join(curr_read)
if perms_write:
postargs['perms.write'] = ','.join(perms_write)
else:
curr_write = curr_acl['perms'].get('write', [])
if curr_write:
postargs['perms.write'] = ','.join(curr_write)
if path.endswith('/acl'):
# Allow ownership to be reset only at entity level.
postargs['owner'] = owner or curr_acl['owner']
postargs['sharing'] = curr_acl['sharing']
try:
content = self._rest_client.post(
path, body=binding._encode(**postargs),
output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise ACLException('Invalid endpoint: %s.', path)
return json.loads(content)['entry'][0]['acl']
| isc |
tdtrask/ansible | lib/ansible/modules/windows/win_firewall_rule.py | 27 | 2799 | #!/usr/bin/env python
# Copyright (c) 2017 Artem Zinenko <[email protected]>
# Copyright (c) 2014 Timothy Vandenbrande <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_firewall_rule
version_added: "2.0"
author:
- Artem Zinenko (@ar7z1)
- Timothy Vandenbrande (@TimothyVandenbrande)
short_description: Windows firewall automation
description:
- Allows you to create/remove/update firewall rules.
options:
enabled:
description:
- Is this firewall rule enabled or disabled.
type: bool
default: 'yes'
aliases: [ 'enable' ]
state:
description:
- Should this rule be added or removed.
default: "present"
choices: ['present', 'absent']
name:
description:
- The rules name
required: true
direction:
description:
- Is this rule for inbound or outbound traffic.
required: true
choices: ['in', 'out']
action:
description:
- What to do with the items this rule is for.
required: true
choices: ['allow', 'block', 'bypass']
description:
description:
- Description for the firewall rule.
localip:
description:
- The local ip address this rule applies to.
default: 'any'
remoteip:
description:
- The remote ip address/range this rule applies to.
default: 'any'
localport:
description:
- The local port this rule applies to.
remoteport:
description:
- The remote port this rule applies to.
program:
description:
- The program this rule applies to.
service:
description:
- The service this rule applies to.
protocol:
description:
- The protocol this rule applies to.
default: 'any'
profiles:
description:
- The profile this rule applies to.
default: 'domain,private,public'
aliases: [ 'profile' ]
force:
description:
- Replace any existing rule by removing it first.
- This is no longer required in 2.4 as rules no longer need replacing when being modified.
- DEPRECATED in 2.4 and will be removed in 2.9.
default: 'no'
choices: [ 'no', 'yes' ]
'''
EXAMPLES = r'''
- name: Firewall rule to allow SMTP on TCP port 25
win_firewall_rule:
name: SMTP
localport: 25
action: allow
direction: in
protocol: tcp
state: present
enabled: yes
- name: Firewall rule to allow RDP on TCP port 3389
win_firewall_rule:
name: Remote Desktop
localport: 3389
action: allow
direction: in
protocol: tcp
profiles: private
state: present
enabled: yes
'''
| gpl-3.0 |
flotre/sickbeard-vfvo | lib/requests/packages/chardet2/langgreekmodel.py | 63 | 12651 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
| gpl-3.0 |
newcastlecy/shadowsocks | shadowsocks/common.py | 57 | 4530 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import struct
import logging
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return socket.inet_ntoa(ipstr)
elif family == socket.AF_INET6:
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j)))
for i, j in zip(ipstr[::2], ipstr[1::2]))
return v6addr
def inet_pton(family, addr):
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return ''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address)
if family == socket.AF_INET6:
return '\x04' + r
else:
return '\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return '\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password' %
addrtype)
if dest_addr is None:
return None
return addrtype, dest_addr, dest_port, header_length
| mit |
dynaryu/rmtk | rmtk/vulnerability/derivation_fragility/R_mu_T_no_dispersion/dolsek_fajfar/__init__.py | 67 | 1875 | # -*- coding: utf-8 -*-
#
# LICENSE
#
# Copyright © 2014-2015, GEM Foundation, Chiara Casotto, Anirudh Rao,
# Vitor Silva.
#
# The Risk Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Risk Modeller's Toolkit (rmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the risk scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Risk Modeller's Toolkit (rmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
| agpl-3.0 |
hryamzik/ansible | lib/ansible/modules/database/vertica/vertica_role.py | 55 | 7933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: vertica_role
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
import traceback
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
mcfletch/AutobahnPython | examples/asyncio/wamp/basic/pubsub/basic/backend.py | 5 | 1250 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@asyncio.coroutine
def onJoin(self, details):
counter = 0
while True:
self.publish('com.myapp.topic1', counter)
counter += 1
yield from asyncio.sleep(1)
| apache-2.0 |
manazhao/tf_recsys | tensorflow/contrib/learn/python/learn/dataframe/transforms/example_parser.py | 26 | 2407 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
| apache-2.0 |
ThinkingBridge/platform_external_chromium_org | chrome/test/functional/media/media_basic_playback.py | 65 | 2975 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Basic playback test. Checks playback, seek, and replay based on events.
This test uses the bear videos from the test matrix in h264, vp8, and theora
formats.
"""
import logging
import os
import pyauto_media
import pyauto
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_basic_playback.html')
# Test videos to play. TODO(dalecurtis): Convert to text matrix parser when we
# have more test videos in the matrix. Code already written, see patch here:
# https://chromiumcodereview.appspot.com/9290008/#ps12
_TEST_VIDEOS = [
pyauto.PyUITest.GetFileURLForContentDataPath('media', name)
for name in ['bear.mp4', 'bear.ogv', 'bear.webm', 'bear_silent.mp4',
'bear_silent.ogv', 'bear_silent.webm']]
# Expected events for the first iteration and every iteration thereafter.
_EXPECTED_EVENTS_0 = [('ended', 2), ('playing', 2), ('seeked', 1),
('suspend', 1)]
_EXPECTED_EVENTS_n = [('abort', 1), ('emptied', 1)] + _EXPECTED_EVENTS_0
class MediaBasicPlaybackTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def testBasicPlaybackMatrix(self):
"""Launches HTML test which plays each video until end, seeks, and replays.
Specifically ensures that after the above sequence of events, the following
are true:
1. The first video has only 2x playing, 2x ended, and 1x seeked events.
2. Each subsequent video additionally has 1x abort and 1x emptied due to
switching of the src attribute.
3. video.currentTime == video.duration for each video.
See the HTML file at _TEST_HTML_PATH for more information.
"""
self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))
for i, media in enumerate(_TEST_VIDEOS):
logging.debug('Running basic playback test for %s', media)
# Block until the test finishes and notifies us. Upon return the value of
# video.currentTime == video.duration is provided.
try:
self.assertTrue(self.ExecuteJavascript("startTest('%s');" % media))
# PyAuto has trouble with arrays, so convert to string prior to request.
events = self.GetDOMValue("events.join(',')").split(',')
counts = [(item, events.count(item)) for item in sorted(set(events))]
# The first loop will not have the abort and emptied events triggered by
# changing the video src.
if (i == 0):
self.assertEqual(counts, _EXPECTED_EVENTS_0)
else:
self.assertEqual(counts, _EXPECTED_EVENTS_n)
except:
logging.debug(
'Test failed with events: %s', self.GetDOMValue("events.join(',')"))
raise
if __name__ == '__main__':
pyauto_media.Main()
| bsd-3-clause |
yuyuyu101/VirtualBox-NetBSD | src/libs/libxml2-2.6.31/python/tests/reader2.py | 87 | 5179 | #!/usr/bin/python -u
#
# this tests the DTD validation with the XmlTextReader interface
#
import sys
import glob
import string
import StringIO
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
err=""
expect="""../../test/valid/rss.xml:177: element rss: validity error : Element rss does not carry attribute version
</rss>
^
../../test/valid/xlink.xml:450: element termdef: validity error : ID dt-arc already defined
<p><termdef id="dt-arc" term="Arc">An <ter
^
../../test/valid/xlink.xml:530: validity error : attribute def line 199 references an unknown ID "dt-xlg"
^
"""
def callback(ctx, str):
global err
err = err + "%s" % (str)
libxml2.registerErrorHandler(callback, "")
valid_files = glob.glob("../../test/valid/*.x*")
valid_files.sort()
for file in valid_files:
if string.find(file, "t8") != -1:
continue
reader = libxml2.newTextReaderFilename(file)
#print "%s:" % (file)
reader.SetParserProp(libxml2.PARSER_VALIDATE, 1)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print "Error parsing and validating %s" % (file)
#sys.exit(1)
if err != expect:
print err
#
# another separate test based on Stephane Bidoul one
#
s = """
<!DOCTYPE test [
<!ELEMENT test (x,b)>
<!ELEMENT x (c)>
<!ELEMENT b (#PCDATA)>
<!ELEMENT c (#PCDATA)>
<!ENTITY x "<x><c>xxx</c></x>">
]>
<test>
&x;
<b>bbb</b>
</test>
"""
expect="""10,test
1,test
14,#text
1,x
1,c
3,#text
15,c
15,x
14,#text
1,b
3,#text
15,b
14,#text
15,test
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test2")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s,%s\n" % (reader.NodeType(),reader.Name())
if res != expect:
print "test2 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test2 failed: validation error found"
print err
sys.exit(1)
#
# Another test for external entity parsing and validation
#
s = """<!DOCTYPE test [
<!ELEMENT test (x)>
<!ELEMENT x (#PCDATA)>
<!ENTITY e SYSTEM "tst.ent">
]>
<test>
&e;
</test>
"""
tst_ent = """<x>hello</x>"""
expect="""10 test
1 test
14 #text
1 x
3 #text
15 x
14 #text
15 test
"""
res=""
def myResolver(URL, ID, ctxt):
if URL == "tst.ent":
return(StringIO.StringIO(tst_ent))
return None
libxml2.setEntityLoader(myResolver)
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test3")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s\n" % (reader.NodeType(),reader.Name())
if res != expect:
print "test3 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test3 failed: validation error found"
print err
sys.exit(1)
#
# Another test for recursive entity parsing, validation, and replacement of
# entities, making sure the entity ref node doesn't show up in that case
#
s = """<!DOCTYPE test [
<!ELEMENT test (x, x)>
<!ELEMENT x (y)>
<!ELEMENT y (#PCDATA)>
<!ENTITY x "<x>&y;</x>">
<!ENTITY y "<y>yyy</y>">
]>
<test>
&x;
&x;
</test>"""
expect="""10 test 0
1 test 0
14 #text 1
1 x 1
1 y 2
3 #text 3
15 y 2
15 x 1
14 #text 1
1 x 1
1 y 2
3 #text 3
15 y 2
15 x 1
14 #text 1
15 test 0
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test4")
reader.SetParserProp(libxml2.PARSER_LOADDTD,1)
reader.SetParserProp(libxml2.PARSER_DEFAULTATTRS,1)
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s %d\n" % (reader.NodeType(),reader.Name(),reader.Depth())
if res != expect:
print "test4 failed: unexpected output"
print res
sys.exit(1)
if err != "":
print "test4 failed: validation error found"
print err
sys.exit(1)
#
# The same test but without entity substitution this time
#
s = """<!DOCTYPE test [
<!ELEMENT test (x, x)>
<!ELEMENT x (y)>
<!ELEMENT y (#PCDATA)>
<!ENTITY x "<x>&y;</x>">
<!ENTITY y "<y>yyy</y>">
]>
<test>
&x;
&x;
</test>"""
expect="""10 test 0
1 test 0
14 #text 1
5 x 1
14 #text 1
5 x 1
14 #text 1
15 test 0
"""
res=""
err=""
input = libxml2.inputBuffer(StringIO.StringIO(s))
reader = input.newTextReader("test5")
reader.SetParserProp(libxml2.PARSER_VALIDATE,1)
while reader.Read() == 1:
res = res + "%s %s %d\n" % (reader.NodeType(),reader.Name(),reader.Depth())
if res != expect:
print "test5 failed: unexpected output"
print res
if err != "":
print "test5 failed: validation error found"
print err
#
# cleanup
#
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 |
aronsky/home-assistant | homeassistant/components/camera/canary.py | 2 | 3622 | """
Support for Canary camera.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.canary/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
from homeassistant.components.canary import DATA_CANARY, DEFAULT_TIMEOUT
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.util import Throttle
CONF_FFMPEG_ARGUMENTS = 'ffmpeg_arguments'
DEPENDENCIES = ['canary', 'ffmpeg']
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SESSION_RENEW = timedelta(seconds=90)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FFMPEG_ARGUMENTS): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Canary sensors."""
data = hass.data[DATA_CANARY]
devices = []
for location in data.locations:
for device in location.devices:
if device.is_online:
devices.append(
CanaryCamera(hass, data, location, device, DEFAULT_TIMEOUT,
config.get(CONF_FFMPEG_ARGUMENTS)))
add_entities(devices, True)
class CanaryCamera(Camera):
"""An implementation of a Canary security camera."""
def __init__(self, hass, data, location, device, timeout, ffmpeg_args):
"""Initialize a Canary security camera."""
super().__init__()
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = ffmpeg_args
self._data = data
self._location = location
self._device = device
self._timeout = timeout
self._live_stream_session = None
@property
def name(self):
"""Return the name of this device."""
return self._device.name
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._location.is_recording
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return not self._location.is_recording
async def async_camera_image(self):
"""Return a still image response from the camera."""
self.renew_live_stream_session()
from haffmpeg import ImageFrame, IMAGE_JPEG
ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop)
image = await asyncio.shield(ffmpeg.get_image(
self._live_stream_session.live_stream_url,
output_format=IMAGE_JPEG,
extra_cmd=self._ffmpeg_arguments), loop=self.hass.loop)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
if self._live_stream_session is None:
return
from haffmpeg import CameraMjpeg
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
self._live_stream_session.live_stream_url,
extra_cmd=self._ffmpeg_arguments)
await async_aiohttp_proxy_stream(
self.hass, request, stream,
'multipart/x-mixed-replace;boundary=ffserver')
await stream.close()
@Throttle(MIN_TIME_BETWEEN_SESSION_RENEW)
def renew_live_stream_session(self):
"""Renew live stream session."""
self._live_stream_session = self._data.get_live_stream_session(
self._device)
| apache-2.0 |
maohongyuan/kbengine | kbe/src/lib/python/Lib/turtledemo/peace.py | 99 | 1066 | #!/usr/bin/env python3
""" turtle-example-suite:
tdemo_peace.py
A simple drawing suitable as a beginner's
programming example. Aside from the
peacecolors assignment and the for loop,
it only uses turtle commands.
"""
from turtle import *
def main():
peacecolors = ("red3", "orange", "yellow",
"seagreen4", "orchid4",
"royalblue1", "dodgerblue4")
reset()
Screen()
up()
goto(-320,-195)
width(70)
for pcolor in peacecolors:
color(pcolor)
down()
forward(640)
up()
backward(640)
left(90)
forward(66)
right(90)
width(25)
color("white")
goto(0,-170)
down()
circle(170)
left(90)
forward(340)
up()
left(180)
forward(170)
right(45)
down()
forward(170)
up()
backward(170)
left(90)
down()
forward(170)
up()
goto(0,300) # vanish if hideturtle() is not available ;-)
return "Done!"
if __name__ == "__main__":
main()
mainloop()
| lgpl-3.0 |
ahamilton55/ansible | lib/ansible/modules/messaging/rabbitmq_vhost.py | 69 | 4384 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_vhost
short_description: Manage the state of a virtual host in RabbitMQ
description:
- Manage the state of a virtual host in RabbitMQ
version_added: "1.1"
author: '"Chris Hoffman (@choffman)"'
options:
name:
description:
- The name of the vhost to manage
required: true
default: null
aliases: [vhost]
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
tracing:
description:
- Enable/disable tracing for a vhost
default: "no"
choices: [ "yes", "no" ]
aliases: [trace]
state:
description:
- The state of vhost
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Ensure that the vhost /test exists.
- rabbitmq_vhost:
name: /test
state: present
'''
class RabbitMqVhost(object):
def __init__(self, module, name, tracing, node):
self.module = module
self.name = name
self.tracing = tracing
self.node = node
self._tracing = False
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self.node]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True)
for vhost in vhosts:
name, tracing = vhost.split('\t')
if name == self.name:
self._tracing = self.module.boolean(tracing)
return True
return False
def add(self):
return self._exec(['add_vhost', self.name])
def delete(self):
return self._exec(['delete_vhost', self.name])
def set_tracing(self):
if self.tracing != self._tracing:
if self.tracing:
self._enable_tracing()
else:
self._disable_tracing()
return True
return False
def _enable_tracing(self):
return self._exec(['trace_on', '-p', self.name])
def _disable_tracing(self):
return self._exec(['trace_off', '-p', self.name])
def main():
arg_spec = dict(
name=dict(required=True, aliases=['vhost']),
tracing=dict(default='off', aliases=['trace'], type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default='rabbit'),
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
tracing = module.params['tracing']
state = module.params['state']
node = module.params['node']
rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node)
changed = False
if rabbitmq_vhost.get():
if state == 'absent':
rabbitmq_vhost.delete()
changed = True
else:
if rabbitmq_vhost.set_tracing():
changed = True
elif state == 'present':
rabbitmq_vhost.add()
rabbitmq_vhost.set_tracing()
changed = True
module.exit_json(changed=changed, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nikkisquared/servo | tests/wpt/harness/wptrunner/update/tree.py | 142 | 11075 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
from .. import vcs
from ..vcs import bind_to_repo, git, hg
def get_unique_name(existing, initial):
"""Get a name either equal to initial or of the form initial_N, for some
integer N, that is not in the set existing.
:param existing: Set of names that must not be chosen.
:param initial: Name, or name prefix, to use"""
if initial not in existing:
return initial
for i in xrange(len(existing) + 1):
test = "%s_%s" % (initial, i + 1)
if test not in existing:
return test
assert False
class NoVCSTree(object):
name = "non-vcs"
def __init__(self, root=None):
if root is None:
root = os.path.abspath(os.curdir)
self.root = root
@classmethod
def is_type(cls, path=None):
return True
@property
def is_clean(self):
return True
def add_new(self, prefix=None):
pass
def create_patch(self, patch_name, message):
pass
def update_patch(self, include=None):
pass
def commit_patch(self):
pass
class HgTree(object):
name = "mercurial"
def __init__(self, root=None):
if root is None:
root = hg("root").strip()
self.root = root
self.hg = vcs.bind_to_repo(hg, self.root)
def __getstate__(self):
rv = self.__dict__.copy()
del rv['hg']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.hg = vcs.bind_to_repo(vcs.hg, self.root)
@classmethod
def is_type(cls, path=None):
kwargs = {"log_error": False}
if path is not None:
kwargs["repo"] = path
try:
hg("root", **kwargs)
except:
return False
return True
@property
def is_clean(self):
return self.hg("status").strip() == ""
def add_new(self, prefix=None):
if prefix is not None:
args = ("-I", prefix)
else:
args = ()
self.hg("add", *args)
def create_patch(self, patch_name, message):
try:
self.hg("qinit", log_error=False)
except subprocess.CalledProcessError:
pass
patch_names = [item.strip() for item in self.hg("qseries").split("\n") if item.strip()]
suffix = 0
test_name = patch_name
while test_name in patch_names:
suffix += 1
test_name = "%s-%i" % (patch_name, suffix)
self.hg("qnew", test_name, "-X", self.root, "-m", message)
def update_patch(self, include=None):
if include is not None:
args = []
for item in include:
args.extend(["-I", item])
else:
args = ()
self.hg("qrefresh", *args)
return True
def commit_patch(self):
self.hg("qfinish")
def contains_commit(self, commit):
try:
self.hg("identify", "-r", commit.sha1)
return True
except subprocess.CalledProcessError:
return False
class GitTree(object):
name = "git"
def __init__(self, root=None):
if root is None:
root = git("rev-parse", "--show-toplevel").strip()
self.root = root
self.git = vcs.bind_to_repo(git, self.root)
self.message = None
self.commit_cls = Commit
def __getstate__(self):
rv = self.__dict__.copy()
del rv['git']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.git = vcs.bind_to_repo(vcs.git, self.root)
@classmethod
def is_type(cls, path=None):
kwargs = {"log_error": False}
if path is not None:
kwargs["repo"] = path
try:
git("rev-parse", "--show-toplevel", **kwargs)
except:
return False
return True
@property
def rev(self):
"""Current HEAD revision"""
if vcs.is_git_root(self.root):
return self.git("rev-parse", "HEAD").strip()
else:
return None
@property
def is_clean(self):
return self.git("status").strip() == ""
def add_new(self, prefix=None):
"""Add files to the staging area.
:param prefix: None to include all files or a path prefix to
add all files under that path.
"""
if prefix is None:
args = ("-a",)
else:
args = ("--no-ignore-removal", prefix)
self.git("add", *args)
def list_refs(self, ref_filter=None):
"""Get a list of sha1, name tuples for references in a repository.
:param ref_filter: Pattern that reference name must match (from the end,
matching whole /-delimited segments only
"""
args = []
if ref_filter is not None:
args.append(ref_filter)
data = self.git("show-ref", *args)
rv = []
for line in data.split("\n"):
if not line.strip():
continue
sha1, ref = line.split()
rv.append((sha1, ref))
return rv
def list_remote(self, remote, ref_filter=None):
"""Return a list of (sha1, name) tupes for references in a remote.
:param remote: URL of the remote to list.
:param ref_filter: Pattern that the reference name must match.
"""
args = []
if ref_filter is not None:
args.append(ref_filter)
data = self.git("ls-remote", remote, *args)
rv = []
for line in data.split("\n"):
if not line.strip():
continue
sha1, ref = line.split()
rv.append((sha1, ref))
return rv
def get_remote_sha1(self, remote, branch):
"""Return the SHA1 of a particular branch in a remote.
:param remote: the remote URL
:param branch: the branch name"""
for sha1, ref in self.list_remote(remote, branch):
if ref == "refs/heads/%s" % branch:
return self.commit_cls(self, sha1)
assert False
def create_patch(self, patch_name, message):
# In git a patch is actually a commit
self.message = message
def update_patch(self, include=None):
"""Commit the staged changes, or changes to listed files.
:param include: Either None, to commit staged changes, or a list
of filenames (which must already be in the repo)
to commit
"""
if include is not None:
args = tuple(include)
else:
args = ()
if self.git("status", "-uno", "-z", *args).strip():
self.git("add", *args)
return True
return False
def commit_patch(self):
assert self.message is not None
if self.git("diff", "--name-only", "--staged", "-z").strip():
self.git("commit", "-m", self.message)
return True
return False
def init(self):
self.git("init")
assert vcs.is_git_root(self.root)
def checkout(self, rev, branch=None, force=False):
"""Checkout a particular revision, optionally into a named branch.
:param rev: Revision identifier (e.g. SHA1) to checkout
:param branch: Branch name to use
:param force: Force-checkout
"""
assert rev is not None
args = []
if branch:
branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
if ref.startswith("refs/heads/")]
branch = get_unique_name(branches, branch)
args += ["-b", branch]
if force:
args.append("-f")
args.append(rev)
self.git("checkout", *args)
def update(self, remote, remote_branch, local_branch):
"""Fetch from the remote and checkout into a local branch.
:param remote: URL to the remote repository
:param remote_branch: Branch on the remote repository to check out
:param local_branch: Local branch name to check out into
"""
if not vcs.is_git_root(self.root):
self.init()
self.git("clean", "-xdf")
self.git("fetch", remote, "%s:%s" % (remote_branch, local_branch))
self.checkout(local_branch)
self.git("submodule", "update", "--init", "--recursive")
def clean(self):
self.git("checkout", self.rev)
self.git("branch", "-D", self.local_branch)
def paths(self):
"""List paths in the tree"""
repo_paths = [self.root] + [os.path.join(self.root, path)
for path in self.submodules()]
rv = []
for repo_path in repo_paths:
paths = vcs.git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split("\n")
rel_path = os.path.relpath(repo_path, self.root)
rv.extend(os.path.join(rel_path, item.strip()) for item in paths if item.strip())
return rv
def submodules(self):
"""List submodule directories"""
output = self.git("submodule", "status", "--recursive")
rv = []
for line in output.split("\n"):
line = line.strip()
if not line:
continue
parts = line.split(" ")
rv.append(parts[1])
return rv
def contains_commit(self, commit):
try:
self.git("rev-parse", "--verify", commit.sha1)
return True
except subprocess.CalledProcessError:
return False
class CommitMessage(object):
def __init__(self, text):
self.text = text
self._parse_message()
def __str__(self):
return self.text
def _parse_message(self):
lines = self.text.splitlines()
self.full_summary = lines[0]
self.body = "\n".join(lines[1:])
class Commit(object):
msg_cls = CommitMessage
_sha1_re = re.compile("^[0-9a-f]{40}$")
def __init__(self, tree, sha1):
"""Object representing a commit in a specific GitTree.
:param tree: GitTree to which this commit belongs.
:param sha1: Full sha1 string for the commit
"""
assert self._sha1_re.match(sha1)
self.tree = tree
self.git = tree.git
self.sha1 = sha1
self.author, self.email, self.message = self._get_meta()
def __getstate__(self):
rv = self.__dict__.copy()
del rv['git']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.git = self.tree.git
def _get_meta(self):
author, email, message = self.git("show", "-s", "--format=format:%an\n%ae\n%B", self.sha1).split("\n", 2)
return author, email, self.msg_cls(message)
| mpl-2.0 |
ml-lab/neon | neon/diagnostics/visualize_rnn.py | 4 | 6174 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Visualization for recurrent neural networks
"""
import numpy as np
from neon.util.compat import range
class VisualizeRNN(object):
"""
Visualzing weight matrices during training
"""
def __init__(self):
import matplotlib.pyplot
self.plt = matplotlib.pyplot
self.plt.interactive(1)
def plot_weights(self, weights_in, weights_rec, weights_out):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(2)
self.plt.clf()
self.plt.subplot(1, 3, 1)
self.plt.imshow(weights_in.T, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('input.T')
self.plt.subplot(1, 3, 2)
self.plt.imshow(weights_rec, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('recurrent')
self.plt.subplot(1, 3, 3)
self.plt.imshow(weights_out, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('output')
self.plt.colorbar()
self.plt.draw()
self.plt.show()
def plot_lstm_wts(self, lstm_layer, scale=1, fig=4):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(fig)
self.plt.clf()
pltidx = 1
for lbl, wts in zip(lstm_layer.param_names, lstm_layer.params[:4]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(wts.asnumpyarray().T, vmin=-scale, vmax=scale,
interpolation='nearest')
self.plt.title(lbl + ' Wx.T')
pltidx += 1
for lbl, wts, bs in zip(lstm_layer.param_names,
lstm_layer.params[4:8],
lstm_layer.params[8:12]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(np.hstack((wts.asnumpyarray(),
bs.asnumpyarray(),
bs.asnumpyarray())).T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + ' Wh.T')
pltidx += 1
self.plt.draw()
self.plt.show()
def plot_lstm_acts(self, lstm_layer, scale=1, fig=4):
acts_lbl = ['i_t', 'f_t', 'o_t', 'g_t', 'net_i', 'c_t', 'c_t', 'c_phi']
acts_stp = [0, 0, 0, 1, 0, 0, 1, 1]
self.plt.figure(fig)
self.plt.clf()
for idx, lbl in enumerate(acts_lbl):
act_tsr = getattr(lstm_layer, lbl)[acts_stp[idx]]
self.plt.subplot(2, 4, idx+1)
self.plt.imshow(act_tsr.asnumpyarray().T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + '[' + str(acts_stp[idx]) + '].T')
self.plt.draw()
self.plt.show()
def plot_error(self, suberror_list, error_list):
self.plt.figure(1)
self.plt.clf()
self.plt.plot(np.arange(len(suberror_list)) /
np.float(len(suberror_list)) *
len(error_list), suberror_list)
self.plt.plot(error_list, linewidth=2)
self.plt.ylim((min(suberror_list), max(error_list)))
self.plt.draw()
self.plt.show()
def plot_activations(self, pre1, out1, pre2, out2, targets):
"""
Loop over tau unrolling steps, at each time step show the pre-acts
and outputs of the recurrent layer and output layer. Note that the
pre-acts are actually the g', so if the activation is linear it will
be one.
"""
self.plt.figure(3)
self.plt.clf()
for i in range(len(pre1)): # loop over unrolling
self.plt.subplot(len(pre1), 5, 5 * i + 1)
self.plt.imshow(pre1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre1 or g\'1')
self.plt.subplot(len(pre1), 5, 5 * i + 2)
self.plt.imshow(out1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out1')
self.plt.subplot(len(pre1), 5, 5 * i + 3)
self.plt.imshow(pre2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre2 or g\'2')
self.plt.subplot(len(pre1), 5, 5 * i + 4)
self.plt.imshow(out2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out2')
self.plt.subplot(len(pre1), 5, 5 * i + 5)
self.plt.imshow(targets[i].asnumpyarray(),
vmin=-1, vmax=1, interpolation='nearest')
if i == 0:
self.plt.title('target')
self.plt.draw()
self.plt.show()
def print_text(self, inputs, outputs):
"""
Moved this here so it's legal to use numpy.
"""
print("Prediction inputs")
print(np.argmax(inputs, 0).asnumpyarray().astype(np.int8).view('c'))
print("Prediction outputs")
print(np.argmax(outputs, 0).asnumpyarray().astype(np.int8).view('c'))
| apache-2.0 |
takis/django | tests/resolve_url/tests.py | 199 | 3167 | from __future__ import unicode_literals
from django.contrib.auth.views import logout
from django.core.urlresolvers import NoReverseMatch, reverse_lazy
from django.shortcuts import resolve_url
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from .models import UnimportantThing
@override_settings(ROOT_URLCONF='resolve_url.urls')
class ResolveUrlTests(SimpleTestCase):
"""
Tests for the ``resolve_url`` function.
"""
def test_url_path(self):
"""
Tests that passing a URL path to ``resolve_url`` will result in the
same url.
"""
self.assertEqual('/something/', resolve_url('/something/'))
def test_relative_path(self):
"""
Tests that passing a relative URL path to ``resolve_url`` will result
in the same url.
"""
self.assertEqual('../', resolve_url('../'))
self.assertEqual('../relative/', resolve_url('../relative/'))
self.assertEqual('./', resolve_url('./'))
self.assertEqual('./relative/', resolve_url('./relative/'))
def test_full_url(self):
"""
Tests that passing a full URL to ``resolve_url`` will result in the
same url.
"""
url = 'http://example.com/'
self.assertEqual(url, resolve_url(url))
def test_model(self):
"""
Tests that passing a model to ``resolve_url`` will result in
``get_absolute_url`` being called on that model instance.
"""
m = UnimportantThing(importance=1)
self.assertEqual(m.get_absolute_url(), resolve_url(m))
def test_view_function(self):
"""
Tests that passing a view name to ``resolve_url`` will result in the
URL path mapping to that view name.
"""
resolved_url = resolve_url(logout)
self.assertEqual('/accounts/logout/', resolved_url)
def test_lazy_reverse(self):
"""
Tests that passing the result of reverse_lazy is resolved to a real URL
string.
"""
resolved_url = resolve_url(reverse_lazy('logout'))
self.assertIsInstance(resolved_url, six.text_type)
self.assertEqual('/accounts/logout/', resolved_url)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_valid_view_name(self):
"""
Tests that passing a view function to ``resolve_url`` will result in
the URL path mapping to that view.
"""
resolved_url = resolve_url('django.contrib.auth.views.logout')
self.assertEqual('/accounts/logout/', resolved_url)
def test_domain(self):
"""
Tests that passing a domain to ``resolve_url`` returns the same domain.
"""
self.assertEqual(resolve_url('example.com'), 'example.com')
def test_non_view_callable_raises_no_reverse_match(self):
"""
Tests that passing a non-view callable into ``resolve_url`` raises a
``NoReverseMatch`` exception.
"""
with self.assertRaises(NoReverseMatch):
resolve_url(lambda: 'asdf')
| bsd-3-clause |
stimpsonsg/moose | gui/gui/CommentEditor.py | 8 | 1534 | #!/usr/bin/python
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from GenSyntax import *
from ParamTable import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class CommentEditor(QtGui.QDialog):
def __init__(self, item, win_parent=None):
QtGui.QDialog.__init__(self, win_parent)
self.item = item
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.edit_box = QtGui.QTextEdit()
try:
self.edit_box.insertPlainText(item.comment)
except:
pass
self.layout.addWidget(self.edit_box)
self.button_layout = QHBoxLayout()
self.apply_button = QPushButton('Apply')
self.cancel_button = QPushButton('Cancel')
self.button_layout.addWidget(self.apply_button)
self.button_layout.addWidget(self.cancel_button)
QtCore.QObject.connect(self.apply_button, QtCore.SIGNAL("clicked()"), self.accept_text)
QtCore.QObject.connect(self.cancel_button, QtCore.SIGNAL("clicked()"), self.reject)
self.layout.addLayout(self.button_layout)
self.resize(700,500)
def accept_text(self):
self.item.comment = str(self.edit_box.toPlainText())
self.accept()
| lgpl-2.1 |
ratschlab/RGAN | eICU_tstr_evaluation.py | 1 | 8268 | import data_utils
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
import data_utils
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve
import copy
from scipy.stats import sem
print ("Starting TSTR experiment.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
# iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs = []
auprcs_all_runs = []
for oo in range(5):
print (oo)
# find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
all_aurocs_exp = []
all_auprcs_exp = []
for nn in np.arange(50,1050,50):
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
all_aurocs = []
all_auprcs = []
# in case we want to train each random forest multiple times with each dataset
for exp_num in range(1):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(vali_seqs_r, vali_targets[:,col_num]))
preds = estimator.predict(vali_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=vali_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=vali_targets[:,col_num]))
preds = estimator.predict_proba(vali_seqs_r)
fpr, tpr, thresholds = roc_curve(vali_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(vali_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
all_aurocs.append(aurocs)
all_auprcs.append(auprcs)
all_aurocs_exp.append(all_aurocs)
all_auprcs_exp.append(all_auprcs)
#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx = np.argmax(np.array(all_aurocs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1) + np.array(all_auprcs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1))
best = np.arange(50,1050,50)[best_idx]
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
print ("----------------------------")
aurocs_all_runs.append(aurocs)
auprcs_all_runs.append(auprcs)
allr = np.vstack(aurocs_all_runs)
allp = np.vstack(auprcs_all_runs)
tstr_aurocs_mean = allr.mean(axis=0)
tstr_aurocs_sem = sem(allr, axis=0)
tstr_auprcs_mean = allp.mean(axis=0)
tstr_auprcs_sem = sem(allp, axis=0)
# get AUROC/AUPRC for real, random data
print ("Experiment with real data.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
aurocs_all = []
auprcs_all = []
for i in range(5):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
aurocs_all.append(aurocs)
auprcs_all.append(auprcs)
real_aurocs_mean = np.array(aurocs_all).mean(axis=0)
real_aurocs_sem = sem(aurocs_all, axis=0)
real_auprcs_mean = np.array(auprcs_all).mean(axis=0)
real_auprcs_sem = sem(auprcs_all, axis=0)
print ("Experiment with random predictions.")
#random score
test_targets_random = copy.deepcopy(test_targets)
random.shuffle(test_targets_random)
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
accuracies.append(accuracy_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
precisions.append(precision_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
preds = np.random.rand(len(test_targets[:,col_num]))
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds)
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds)
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
random_aurocs = aurocs
random_auprcs = auprcs
print("Results")
print("------------")
print("------------")
print("TSTR")
print(tstr_aurocs_mean)
print(tstr_aurocs_sem)
print(tstr_auprcs_mean)
print(tstr_auprcs_sem)
print("------------")
print("Real")
print(real_aurocs_mean)
print(real_aurocs_sem)
print(real_auprcs_mean)
print(real_auprcs_sem)
print("------------")
print("Random")
print(random_aurocs)
print(random_auprcs) | mit |
cyisfor/Python-Markdown | markdown/extensions/extra.py | 122 | 5547 | """
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/extra.html>
for documentation.
Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from .. import util
import re
extensions = [
'markdown.extensions.smart_strong',
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.attr_list',
'markdown.extensions.def_list',
'markdown.extensions.tables',
'markdown.extensions.abbr'
]
class ExtraExtension(Extension):
""" Add various extensions to Markdown class."""
def __init__(self, *args, **kwargs):
""" config is a dumb holder which gets passed to actual ext later. """
self.config = kwargs.pop('configs', {})
self.config.update(kwargs)
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
if not md.safeMode:
# Turn on processing of markdown text within raw html
md.preprocessors['html_block'].markdown_in_raw = True
md.parser.blockprocessors.add('markdown_block',
MarkdownInHtmlProcessor(md.parser),
'_begin')
md.parser.blockprocessors.tag_counter = -1
md.parser.blockprocessors.contain_span_tags = re.compile(
r'^(p|h[1-6]|li|dd|dt|td|th|legend|address)$', re.IGNORECASE)
def makeExtension(*args, **kwargs):
return ExtraExtension(*args, **kwargs)
class MarkdownInHtmlProcessor(BlockProcessor):
"""Process Markdown Inside HTML Blocks."""
def test(self, parent, block):
return block == util.TAG_PLACEHOLDER % \
str(self.parser.blockprocessors.tag_counter + 1)
def _process_nests(self, element, block):
"""Process the element's child elements in self.run."""
# Build list of indexes of each nest within the parent element.
nest_index = [] # a list of tuples: (left index, right index)
i = self.parser.blockprocessors.tag_counter + 1
while len(self._tag_data) > i and self._tag_data[i]['left_index']:
left_child_index = self._tag_data[i]['left_index']
right_child_index = self._tag_data[i]['right_index']
nest_index.append((left_child_index - 1, right_child_index))
i += 1
# Create each nest subelement.
for i, (left_index, right_index) in enumerate(nest_index[:-1]):
self.run(element, block[left_index:right_index],
block[right_index:nest_index[i + 1][0]], True)
self.run(element, block[nest_index[-1][0]:nest_index[-1][1]], # last
block[nest_index[-1][1]:], True) # nest
def run(self, parent, blocks, tail=None, nest=False):
self._tag_data = self.parser.markdown.htmlStash.tag_data
self.parser.blockprocessors.tag_counter += 1
tag = self._tag_data[self.parser.blockprocessors.tag_counter]
# Create Element
markdown_value = tag['attrs'].pop('markdown')
element = util.etree.SubElement(parent, tag['tag'], tag['attrs'])
# Slice Off Block
if nest:
self.parser.parseBlocks(parent, tail) # Process Tail
block = blocks[1:]
else: # includes nests since a third level of nesting isn't supported
block = blocks[tag['left_index'] + 1: tag['right_index']]
del blocks[:tag['right_index']]
# Process Text
if (self.parser.blockprocessors.contain_span_tags.match( # Span Mode
tag['tag']) and markdown_value != 'block') or \
markdown_value == 'span':
element.text = '\n'.join(block)
else: # Block Mode
i = self.parser.blockprocessors.tag_counter + 1
if len(self._tag_data) > i and self._tag_data[i]['left_index']:
first_subelement_index = self._tag_data[i]['left_index'] - 1
self.parser.parseBlocks(
element, block[:first_subelement_index])
if not nest:
block = self._process_nests(element, block)
else:
self.parser.parseBlocks(element, block)
| bsd-3-clause |
360youlun/cmsplugin-bootstrap-carousel | cmsplugin_bootstrap_carousel/migrations/0002_auto__add_field_carouselitem_button_title__add_field_carouselitem_butt.py | 1 | 4403 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CarouselItem.button_title'
db.add_column(u'cmsplugin_bootstrap_carousel_carouselitem', 'button_title',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'CarouselItem.button_url'
db.add_column(u'cmsplugin_bootstrap_carousel_carouselitem', 'button_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CarouselItem.button_title'
db.delete_column(u'cmsplugin_bootstrap_carousel_carouselitem', 'button_title')
# Deleting field 'CarouselItem.button_url'
db.delete_column(u'cmsplugin_bootstrap_carousel_carouselitem', 'button_url')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cmsplugin_bootstrap_carousel.carousel': {
'Meta': {'object_name': 'Carousel', 'db_table': "u'cmsplugin_carousel'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'domid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '5000'})
},
u'cmsplugin_bootstrap_carousel.carouselitem': {
'Meta': {'object_name': 'CarouselItem'},
'button_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'button_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'caption_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'caption_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'carousel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cmsplugin_bootstrap_carousel.Carousel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_bootstrap_carousel'] | bsd-3-clause |
aperigault/ansible | lib/ansible/module_utils/network/f5/urls.py | 60 | 4623 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
try:
from library.module_utils.network.f5.common import F5ModuleError
except ImportError:
from ansible.module_utils.network.f5.common import F5ModuleError
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters.
NOTE: This is a slightly modified version of the original function
taken from the requests library:
http://docs.python-requests.org/en/master/_modules/requests/utils/
:param header: string containing ':'.
"""
try:
name, value = header.split(':')
except ValueError:
raise F5ModuleError('Invalid header format: {0}'.format(header))
if name == '':
raise F5ModuleError('Invalid header format: {0}'.format(header))
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise F5ModuleError("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise F5ModuleError("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def build_service_uri(base_uri, partition, name):
"""Build the proper uri for a service resource.
This follows the scheme:
<base_uri>/~<partition>~<<name>.app>~<name>
:param base_uri: str -- base uri of the REST endpoint
:param partition: str -- partition for the service
:param name: str -- name of the service
:returns: str -- uri to access the service
"""
name = name.replace('/', '~')
return '%s~%s~%s.app~%s' % (base_uri, partition, name, name)
def parseStats(entry):
if 'description' in entry:
return entry['description']
elif 'value' in entry:
return entry['value']
elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']:
if 'entries' in entry:
entries = entry['entries']
else:
entries = entry['nestedStats']['entries']
result = None
for name in entries:
entry = entries[name]
if 'https://localhost' in name:
name = name.split('/')
name = name[-1]
if result and isinstance(result, list):
result.append(parseStats(entry))
elif result and isinstance(result, dict):
result[name] = parseStats(entry)
else:
try:
int(name)
result = list()
result.append(parseStats(entry))
except ValueError:
result = dict()
result[name] = parseStats(entry)
else:
if '.' in name:
names = name.split('.')
key = names[0]
value = names[1]
if result is None:
# result can be None if this branch is reached first
#
# For example, the mgmt/tm/net/trunk/NAME/stats API
# returns counters.bitsIn before anything else.
result = dict()
result[key] = dict()
elif key not in result:
result[key] = dict()
elif result[key] is None:
result[key] = dict()
result[key][value] = parseStats(entry)
else:
if result and isinstance(result, list):
result.append(parseStats(entry))
elif result and isinstance(result, dict):
result[name] = parseStats(entry)
else:
try:
int(name)
result = list()
result.append(parseStats(entry))
except ValueError:
result = dict()
result[name] = parseStats(entry)
return result
| gpl-3.0 |
geminateCoder/Character-Archive-Website | Lib/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| cc0-1.0 |
markflorisson/blaze-core | blaze/compute/air/frontend/ckernel_impls.py | 6 | 1562 | # -*- coding: utf-8 -*-
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
transform(CKernelImplementations(), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def op_kernel(self, op):
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = monosig.argtypes
if function.matches('ckernel', argtypes):
overload = function.best_match('ckernel', argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
return op
| bsd-3-clause |
zzicewind/nova | nova/tests/unit/pci/test_utils.py | 44 | 7119 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import mock
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1'}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
class GetFunctionByIfnameTestCase(test.NoDBTestCase):
@mock.patch.object(os, 'readlink')
@mock.patch.object(os, 'listdir')
def test_virtual_function(self, mock_listdir, mock_readlink):
mock_listdir.return_value = ['foo', 'bar']
mock_readlink.return_value = '../../../0000.00.00.1'
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000.00.00.1')
self.assertFalse(physical_function)
@mock.patch.object(os, 'readlink')
@mock.patch.object(os, 'listdir')
def test_physical_function(self, mock_listdir, mock_readlink):
mock_listdir.return_value = ['foo', 'virtfn1', 'bar']
mock_readlink.return_value = '../../../0000:00:00.1'
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000:00:00.1')
self.assertTrue(physical_function)
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
address, physical_function = utils.get_function_by_ifname('lo')
self.assertIsNone(address)
self.assertFalse(physical_function)
class IsPhysicalFunctionTestCase(test.NoDBTestCase):
class FakePciAddress(object):
def __init__(self):
self.domain = 0
self.bus = 0
self.slot = 0
self.func = 0
def setUp(self):
super(IsPhysicalFunctionTestCase, self).setUp()
self.pci_address = self.FakePciAddress()
@mock.patch.object(os, 'listdir')
def test_virtual_function(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
self.assertFalse(utils.is_physical_function(self.pci_address))
@mock.patch.object(os, 'listdir')
def test_physical_function(self, mock_listdir):
mock_listdir.return_value = ['foo', 'virtfn1', 'bar']
self.assertTrue(utils.is_physical_function(self.pci_address))
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertFalse(utils.is_physical_function(self.pci_address))
class GetIfnameByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetIfnameByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_virtual_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=False)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_ifname_by_pci_address,
self.pci_address
)
class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetVfNumByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
self.paths = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
self.assertEqual(vf_num, '3')
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_not_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.2'
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_exception(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
| apache-2.0 |
komsas/OpenUpgrade | addons/stock_landed_costs/product.py | 364 | 1611 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
SPLIT_METHOD = [
('equal', 'Equal'),
('by_quantity', 'By Quantity'),
('by_current_cost_price', 'By Current Cost Price'),
('by_weight', 'By Weight'),
('by_volume', 'By Volume'),
]
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'landed_cost_ok': fields.boolean('Can constitute a landed cost'),
'split_method': fields.selection(SPLIT_METHOD, 'Split Method'),
}
_defaults = {
'landed_cost_ok': False,
'split_method': 'equal',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jhcepas/ete | ete3/tools/ete_build_lib/task/trimal.py | 4 | 4158 | # -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
import os
import sys
import logging
from six.moves import map
log = logging.getLogger("main")
from ..master_task import AlgCleanerTask
from ..master_job import Job
from ..utils import SeqGroup, GLOBALS, hascontent, DATATYPES, pjoin
from .. import db
__all__ = ["Trimal"]
class Trimal(AlgCleanerTask):
def __init__(self, nodeid, seqtype, alg_fasta_file, alg_phylip_file,
conf, confname):
GLOBALS["citator"].add('trimal')
self.confname = confname
self.conf = conf
self.seqtype = seqtype
self.alg_fasta_file = alg_fasta_file
self.alg_phylip_file = alg_phylip_file
base_args = {
'-in': None,
'-out': None,
'-fasta': "",
'-colnumbering': "",
}
# Initialize task
AlgCleanerTask.__init__(self, nodeid, "acleaner", "Trimal",
base_args,
self.conf[confname])
self.init()
def load_jobs(self):
appname = self.conf[self.confname]["_app"]
args = self.args.copy()
args["-in"] = pjoin(GLOBALS["input_dir"], self.alg_fasta_file)
args["-out"] = "clean.alg.fasta"
job = Job(self.conf["app"][appname], args, parent_ids=[self.nodeid])
job.add_input_file(self.alg_fasta_file)
self.jobs.append(job)
def finish(self):
# Once executed, alignment is converted into relaxed
# interleaved phylip format. Both files, fasta and phylip,
# remain accessible.
# Set Task specific attributes
main_job = self.jobs[0]
fasta_path = pjoin(main_job.jobdir, "clean.alg.fasta")
alg = SeqGroup(fasta_path)
if len(alg) != self.size:
log.warning("Trimming was to aggressive and it tried"
" to remove one or more sequences."
" Alignment trimming will be disabled for this dataset."
)
self.clean_alg_fasta_file = db.register_task_data(self.taskid, DATATYPES.clean_alg_fasta, self.alg_fasta_file)
self.clean_alg_phylip_file = db.register_task_data(self.taskid, DATATYPES.clean_alg_phylip, self.alg_phylip_file)
else:
for line in open(self.jobs[0].stdout_file):
line = line.strip()
if line.startswith("#ColumnsMap"):
kept_columns = list(map(int, line.split("\t")[1].split(",")))
fasta = alg.write(format="fasta")
phylip = alg.write(format="iphylip_relaxed")
AlgCleanerTask.store_data(self, fasta, phylip, kept_columns)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.