prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
<|fim_middle|>
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters'] |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
<|fim_middle|>
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | raise Exception('Character tried to enter stage when already on stage at object ' + str(i)) |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
<|fim_middle|>
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | characters_on_stage = [] |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
<|fim_middle|>
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']] |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
<|fim_middle|>
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker'] |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
<|fim_middle|>
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1]) |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
<|fim_middle|>
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
}) |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
<|fim_middle|>
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)<|fim▁end|> | raise Exception('A line did not have an associated speaker at object ' + str(i)) |
<|file_name|>step6.py<|end_file_name|><|fim▁begin|>import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
<|fim_middle|>
json.dump(end_obj, out)<|fim▁end|> | currently_speaking = None |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)<|fim▁hole|> sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()<|fim▁end|> | oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing') |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
<|fim_middle|>
if __name__ == '__main__':
main()
<|fim▁end|> | oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped') |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
<|fim_middle|>
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()
<|fim▁end|> | print('No argument or flag specified. Did nothing')
sys.exit(0) |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
<|fim_middle|>
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()
<|fim▁end|> | print('Harvester package changed')
#TODO
pass
hpi.renew_local_info() |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
<|fim_middle|>
if __name__ == '__main__':
main()
<|fim▁end|> | print('Harvester package unchanged. Skipped') |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | main() |
<|file_name|>prescript.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def <|fim_middle|>():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()
<|fim▁end|> | main |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"<|fim▁hole|>def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))<|fim▁end|> | _else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
|
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
<|fim_middle|>
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1 |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
<|fim_middle|>
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
<|fim_middle|>
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u'' |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
<|fim_middle|>
<|fim▁end|> | run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs)) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
<|fim_middle|>
<|fim▁end|> | params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs)) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
<|fim_middle|>
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | curr_idx += 1
continue |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
<|fim_middle|>
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | break |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
<|fim_middle|>
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | default_value = nxt.next_sibling
curr_idx += 2 |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
<|fim_middle|>
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | default_value = None |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
<|fim_middle|>
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:] |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
<|fim_middle|>
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | kw_params = raw_params[3:] |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
<|fim_middle|>
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | param.remove() |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
<|fim_middle|>
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return False |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
<|fim_middle|>
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return True |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
<|fim_middle|>
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | continue |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
<|fim_middle|>
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | needs_fix = True |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
<|fim_middle|>
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return t.value if needs_fix else u'' |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
<|fim_middle|>
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | found_kwargs = True |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
<|fim_middle|>
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return kwargs_default if needs_fix else u'' |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
<|fim_middle|>
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | params_rawlist = params_rawlist[i:]
break |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
<|fim_middle|>
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
<|fim_middle|>
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | return |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
<|fim_middle|>
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident)) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
<|fim_middle|>
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident)) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
<|fim_middle|>
<|fim▁end|> | arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs)) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
<|fim_middle|>
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | arglist.append_child(Comma()) |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def <|fim_middle|>(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | gen_params |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def <|fim_middle|>(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | remove_params |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def <|fim_middle|>(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | needs_fixing |
<|file_name|>fix_kwargs.py<|end_file_name|><|fim▁begin|>u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def <|fim_middle|>(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
<|fim▁end|> | transform |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)<|fim▁hole|> for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))<|fim▁end|> |
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths() |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
<|fim_middle|>
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | return v.lower() in ("yes", "true", "t", "1") |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
<|fim_middle|>
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | '''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy() |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
<|fim_middle|>
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | remove(out_file) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
<|fim_middle|>
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | min_level = offset + interval * floor((min_value - offset)/interval) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
<|fim_middle|>
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | out_val = contours.levels[level] + interval |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
<|fim_middle|>
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | out_val = contours.levels[level] |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
<|fim_middle|>
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
<|fim_middle|>
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | pol.AddGeometry(ring) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
<|fim_middle|>
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | print "Failed to create feature in shapefile.\n"
exit( 1 ) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up)) |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def <|fim_middle|>(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | str2bool |
<|file_name|>isobands_matplotlib.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by Roger Veciana i Rovira, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def <|fim_middle|>(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
<|fim▁end|> | isobands |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"<|fim▁hole|> 'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()<|fim▁end|> | collection.insert({
'event': event, |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
<|fim_middle|>
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | "Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION] |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
<|fim_middle|>
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | "Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
}) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
<|fim_middle|>
<|fim▁end|> | "Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField() |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
<|fim_middle|>
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
<|fim_middle|>
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT) |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def <|fim_middle|>():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def save_event(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | get_mongo_collection |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from pymongo.connection import Connection
from django.db import models
from eventtracker.conf import settings
def get_mongo_collection():
"Open a connection to MongoDB and return the collection to use."
if settings.RIGHT_MONGODB_HOST:
connection = Connection.paired(
left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
)
else:
connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
def <|fim_middle|>(collection, event, timestamp, params):
"Save the event in MongoDB collection"
collection.insert({
'event': event,
'timestamp': datetime.fromtimestamp(timestamp),
'params': params
})
class Event(models.Model):
"Dummy model for development."
timestamp = models.DateTimeField(auto_now_add=True)
event = models.SlugField()
params = models.TextField()
<|fim▁end|> | save_event |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):<|fim▁hole|> """
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target<|fim▁end|> | """ |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
<|fim_middle|>
<|fim▁end|> | """
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
<|fim_middle|>
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | """
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target() |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
<|fim_middle|>
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | """Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
<|fim_middle|>
<|fim▁end|> | """
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
<|fim_middle|>
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
<|fim_middle|>
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | self.target_index += 1 |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
<|fim_middle|>
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | target = [1, 2] |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
<|fim_middle|>
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | target = self.target_list[self.target_index] |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
<|fim_middle|>
else:
self.controller.target = target
<|fim▁end|> | self.target_index += 1
self.set_target() |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
<|fim_middle|>
<|fim▁end|> | self.controller.target = target |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def <|fim_middle|>(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | __init__ |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def <|fim_middle|>(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | control |
<|file_name|>target_list.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def <|fim_middle|>(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
<|fim▁end|> | set_target |
<|file_name|>packet5.py<|end_file_name|><|fim▁begin|>from . import packet
<|fim▁hole|>
class Packet5(packet.Packet):
def __init__(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID<|fim▁end|> | |
<|file_name|>packet5.py<|end_file_name|><|fim▁begin|>from . import packet
class Packet5(packet.Packet):
<|fim_middle|>
<|fim▁end|> | def __init__(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID |
<|file_name|>packet5.py<|end_file_name|><|fim▁begin|>from . import packet
class Packet5(packet.Packet):
def __init__(self, player, slot):
<|fim_middle|>
<|fim▁end|> | super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID |
<|file_name|>packet5.py<|end_file_name|><|fim▁begin|>from . import packet
class Packet5(packet.Packet):
def <|fim_middle|>(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID
<|fim▁end|> | __init__ |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
<|fim▁hole|>
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])<|fim▁end|> | import omero
import omero.constants
from omero.rtypes import rstring |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
<|fim_middle|>
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | """CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids)) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
<|fim_middle|>
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments() |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
<|fim_middle|>
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | """Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv)) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
<|fim_middle|>
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | """Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids)) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
<|fim_middle|>
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | subDirs.append(fullpath) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
<|fim_middle|>
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | p = path[:-1] # will remove the last folder
p = os.path.dirname(p) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
<|fim_middle|>
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | if os.path.basename(path) == "":
p = path[:-1] # remove slash |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
<|fim_middle|>
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | p = path[:-1] # remove slash |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
<|fim_middle|>
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv)) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
<|fim_middle|>
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv)) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
<|fim_middle|>
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | self.ctx.die(654, "Could find the object_details file at %s"
% filePath) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:]) |
<|file_name|>cecog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def <|fim_middle|>(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
<|fim▁end|> | _configure |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.