prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | """Holds all pytee logic.""" |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units<|fim▁hole|>
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)<|fim▁end|> | jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): <|fim_middle|>
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n') |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
<|fim_middle|>
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
<|fim_middle|>
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
<|fim_middle|>
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
<|fim_middle|>
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | """Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
<|fim_middle|>
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | echo2("Incoming content is "+content) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
<|fim_middle|>
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
<|fim_middle|>
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label)) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
<|fim_middle|>
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | entity_names.append(' '.join([child[0] for child in t])) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
<|fim_middle|>
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | for child in t:
entity_names.extend(extract_entity_names(child, label)) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
<|fim_middle|>
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | argv = sys.argv |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): <|fim_middle|>
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | echo2(USAGE); sys.exit() |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): <|fim_middle|>
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | port = int(val) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): <|fim_middle|>
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | Verbose = 1 |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): <|fim_middle|>
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | Units = 1 |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: <|fim_middle|>
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | die(USAGE) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | main(sys.argv) |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def <|fim_middle|>(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | echo2 |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def <|fim_middle|>():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | status |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def <|fim_middle|>():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | namedEntityRecognizer |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def <|fim_middle|>(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | extract_entity_names |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def <|fim_middle|>(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
<|fim▁end|> | main |
<|file_name|>example_ros_spinnaker_interface.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Stephan Reith
@date 31.08.2016
This is a simple example to demonstrate how the ROS Spinnaker Interface can be used.
You will also need a ROS Listener and a ROS Talker to send and receive data.
Make sure they communicate over the same ROS topics and std_msgs.Int64 ROS Messages used in here.
"""
import spynnaker.pyNN as pynn
from ros_spinnaker_interface import ROS_Spinnaker_Interface
# import transfer_functions as tf
from ros_spinnaker_interface import SpikeSourcePoisson
from ros_spinnaker_interface import SpikeSinkSmoothing
ts = 0.1
n_neurons = 1
simulation_time = 10000 # ms
pynn.setup(timestep=ts, min_delay=ts, max_delay=2.0*ts)
pop = pynn.Population(size=n_neurons, cellclass=pynn.IF_curr_exp, cellparams={}, label='pop')
# The ROS_Spinnaker_Interface just needs to be initialised. The following parameters are possible:
ros_interface = ROS_Spinnaker_Interface(
n_neurons_source=n_neurons, # number of neurons of the injector population
Spike_Source_Class=SpikeSourcePoisson, # the transfer function ROS Input -> Spikes you want to use.
Spike_Sink_Class=SpikeSinkSmoothing, # the transfer function Spikes -> ROS Output you want to use.
# You can choose from the transfer_functions module
# or write one yourself.
output_population=pop, # the pynn population you wish to receive the
# live spikes from.
ros_topic_send='to_spinnaker', # the ROS topic used for the incoming ROS values.
ros_topic_recv='from_spinnaker', # the ROS topic used for the outgoing ROS values.
clk_rate=1000, # mainloop clock (update) rate in Hz.
ros_output_rate=10) # number of ROS messages send out per second.
# Build your network, run the simulation and optionally record the spikes and voltages.
pynn.Projection(ros_interface, pop, pynn.OneToOneConnector(weights=5, delays=1))
pop.record()
pop.record_v()
<|fim▁hole|>pynn.end()
# Plot
import pylab
spike_times = [spike[1] for spike in spikes]
spike_ids = [spike[0] for spike in spikes]
pylab.plot(spike_times, spike_ids, ".")
pylab.xlabel('Time (ms)')
pylab.ylabel('Neuron ID')
pylab.title('Spike Plot')
pylab.xlim(xmin=0)
pylab.show()<|fim▁end|> | pynn.run(simulation_time)
spikes = pop.getSpikes()
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from __future__ import unicode_literals, print_function, division<|fim▁end|> | # -*- coding: UTF-8 -*- |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single<|fim▁hole|>class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size<|fim▁end|> | |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
<|fim_middle|>
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id") |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
<|fim_middle|>
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path") |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
<|fim_middle|>
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
<|fim_middle|>
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID') |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
<|fim_middle|>
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
<|fim_middle|>
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
<|fim_middle|>
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size)) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
<|fim_middle|>
<|fim▁end|> | @classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
<|fim_middle|>
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1") |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
<|fim_middle|>
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | clean_gbq_environment(_get_private_key_path()) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
<|fim_middle|>
<|fim▁end|> | destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
<|fim_middle|>
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | DATASET_ID = 'pydata_pandas_bq_testing_py3' |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
<|fim_middle|>
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | DATASET_ID = 'pydata_pandas_bq_testing_py2' |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
<|fim_middle|>
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | pytest.skip(
"Cannot run integration tests without a project id") |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
<|fim_middle|>
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | pytest.skip("Cannot run integration tests without a "
"private key json file path") |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
<|fim_middle|>
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | return os.environ.get('GBQ_PROJECT_ID') |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
<|fim_middle|>
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json']) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
<|fim_middle|>
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS') |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
<|fim_middle|>
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
<|fim_middle|>
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | table.delete(TABLE_ID + str(j)) |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def <|fim_middle|>():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _skip_if_no_project_id |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def <|fim_middle|>():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _skip_if_no_private_key_path |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def <|fim_middle|>():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _in_travis_environment |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def <|fim_middle|>():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _get_project_id |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def <|fim_middle|>():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | _get_private_key_path |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def <|fim_middle|>(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | clean_gbq_environment |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def <|fim_middle|>(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | make_mixed_dataframe_v2 |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def <|fim_middle|>(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | setup_class |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def <|fim_middle|>(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | teardown_class |
<|file_name|>test_gbq.py<|end_file_name|><|fim▁begin|>import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def <|fim_middle|>(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
<|fim▁end|> | test_roundtrip |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
<|fim▁hole|>
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]<|fim▁end|> | |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
<|fim_middle|>
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
<|fim_middle|>
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
<|fim_middle|>
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
<|fim_middle|>
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
<|fim_middle|>
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
<|fim_middle|>
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""") |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
<|fim_middle|>
<|fim▁end|> | functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
] |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def <|fim_middle|>(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | test_fileobj |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def <|fim_middle|>(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | test_sep |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def <|fim_middle|>(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | test_end |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def <|fim_middle|>(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | test_flush |
<|file_name|>test_print.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def <|fim_middle|>(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
<|fim▁end|> | test_combined |
<|file_name|>collectionsexample.py<|end_file_name|><|fim▁begin|>#!/bin/env python
import itertools
import collections
def read_table(filename):
with open(filename) as fp:
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):<|fim▁hole|> for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]<|fim▁end|> | patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns) |
<|file_name|>collectionsexample.py<|end_file_name|><|fim▁begin|>#!/bin/env python
import itertools
import collections
def read_table(filename):
<|fim_middle|>
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):
patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns)
for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]
<|fim▁end|> | with open(filename) as fp:
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data |
<|file_name|>collectionsexample.py<|end_file_name|><|fim▁begin|>#!/bin/env python
import itertools
import collections
def <|fim_middle|>(filename):
with open(filename) as fp:
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):
patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns)
for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]
<|fim▁end|> | read_table |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)<|fim▁end|> | |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
<|fim_middle|>
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | print('the backends file does not exist')
sys.exit(os.EX_NOINPUT) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
<|fim_middle|>
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
<|fim_middle|>
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
<|fim_middle|>
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | print('password = `pass show weboob/%s`' % backend, file=outp)
continue |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
<|fim_middle|>
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | errors += 1
print('warning: could not store password for backend %r' % backend) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
<|fim_middle|>
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
<|fim_middle|>
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
<|fim▁end|> | print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR) |
<|file_name|>replace-backends-pass.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
<|fim_middle|>
<|fim▁end|> | print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2) |
<|file_name|>actionpotential.py<|end_file_name|><|fim▁begin|>import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7<|fim▁hole|>PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)<|fim▁end|> | # Enable ctypes -> Jinja2 tracebacks |
<|file_name|>actionpotential.py<|end_file_name|><|fim▁begin|>import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
<|fim_middle|>
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)
<|fim▁end|> | from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src') |
<|file_name|>actionpotential.py<|end_file_name|><|fim▁begin|>import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
<|fim_middle|>
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)
<|fim▁end|> | TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist') |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()<|fim▁hole|> return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)<|fim▁end|> | if not open_anyway: |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
<|fim_middle|>
<|fim▁end|> | def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
<|fim_middle|>
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
<|fim_middle|>
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_view = main_view
self.init_hotkeys() |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
<|fim_middle|>
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection() |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
<|fim_middle|>
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
<|fim_middle|>
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_model.cancel_job_fetch() |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
<|fim_middle|>
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_view.show_job_fetch_progress_dialog(max) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
<|fim_middle|>
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_view.update_job_fetch_progress_dialog(progress) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
<|fim_middle|>
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
<|fim_middle|>
<|fim▁end|> | job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index) |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
<|fim_middle|>
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | self.main_view.show_job_already_exists_dialog()
return |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
<|fim_middle|>
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
<|fim_middle|>
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | return |
<|file_name|>main_ctrl.py<|end_file_name|><|fim▁begin|>from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def <|fim_middle|>(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
<|fim▁end|> | __init__ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.