prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
<|fim_middle|>
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
<|fim_middle|>
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | return cls.from_targets(dir_path, **kwargs) |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
<|fim_middle|>
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
<|fim_middle|>
<|fim▁end|> | logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
) |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
<|fim_middle|>
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests) |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
<|fim_middle|>
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | for target_test in cls.collect_file(
target_path
):
tests.append(target_test) |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def <|fim_middle|>(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | from_targets |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def <|fim_middle|>(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | from_dir |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def <|fim_middle|>(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | collect_dir |
<|file_name|>suite.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def <|fim_middle|>(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
<|fim▁end|> | collect_file |
<|file_name|>test_send_ai_pics_stats.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
<|fim▁hole|>
class SendAiPicsStatsTestCase(TestCase):
def test_run_command(self):
call_command('send_ai_pics_stats')<|fim▁end|> | from django.core.management import call_command
|
<|file_name|>test_send_ai_pics_stats.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from django.core.management import call_command
class SendAiPicsStatsTestCase(TestCase):
<|fim_middle|>
<|fim▁end|> | def test_run_command(self):
call_command('send_ai_pics_stats') |
<|file_name|>test_send_ai_pics_stats.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from django.core.management import call_command
class SendAiPicsStatsTestCase(TestCase):
def test_run_command(self):
<|fim_middle|>
<|fim▁end|> | call_command('send_ai_pics_stats') |
<|file_name|>test_send_ai_pics_stats.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from django.core.management import call_command
class SendAiPicsStatsTestCase(TestCase):
def <|fim_middle|>(self):
call_command('send_ai_pics_stats')
<|fim▁end|> | test_run_command |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:<|fim▁hole|> elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()<|fim▁end|> | subfile = 'F/' |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
<|fim_middle|>
if __name__ == "__main__":
main()
<|fim▁end|> | parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] ) |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
<|fim_middle|>
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()
<|fim▁end|> | subfile = 'F/' |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
<|fim_middle|>
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()
<|fim▁end|> | subfile = 'IHC/' |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
<|fim_middle|>
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()
<|fim▁end|> | subfile = 'N/' |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | main() |
<|file_name|>jp2kakadu.py<|end_file_name|><|fim▁begin|># Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def <|fim_middle|>():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()
<|fim▁end|> | main |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST<|fim▁hole|> for l in linesep:
processline(l)
continue
processline(line)<|fim▁end|> | line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split() |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
<|fim_middle|>
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | irc.send(msg + "\r\n")
print "{SENT} " + msg
return |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
<|fim_middle|>
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | send("PRIVMSG " + user + " :" + msg)
return |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
<|fim_middle|>
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
<|fim_middle|>
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | args.append(parts[1]) |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
<|fim_middle|>
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | send("PONG :" + args[1])
return |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
<|fim_middle|>
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | questions = str(Question.objects.all())
msg(args[2], questions)
return |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
<|fim_middle|>
processline(line)
<|fim▁end|> | linesep = line.split()
for l in linesep:
processline(l)
continue |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def <|fim_middle|>(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | send |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def <|fim_middle|>(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | msg |
<|file_name|>bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SENT} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def <|fim_middle|>(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
<|fim▁end|> | processline |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>
import codecs
import os
from setuptools import setup, find_packages
def requirements():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)<|fim▁end|> | """The setup and build script for the python-telegram-bot library.""" |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def requirements():
<|fim_middle|>
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)
<|fim▁end|> | """Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def <|fim_middle|>():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)
<|fim▁end|> | requirements |
<|file_name|>inctest_runme.py<|end_file_name|><|fim▁begin|>import inctest
error = 0
try:
a = inctest.A()
except:
print "didn't find A"
print "therefore, I didn't include 'testdir/subdir1/hello.i'"
error = 1
pass
try:
b = inctest.B()
except:
print "didn't find B"
print "therefore, I didn't include 'testdir/subdir2/hello.i'"
error = 1
pass
if error == 1:
raise RuntimeError<|fim▁hole|> raise RuntimeError
if inctest.importtest2("black") != "white":
print "import test 2 failed"
raise RuntimeError<|fim▁end|> |
# Check the import in subdirectory worked
if inctest.importtest1(5) != 15:
print "import test 1 failed" |
<|file_name|>inctest_runme.py<|end_file_name|><|fim▁begin|>import inctest
error = 0
try:
a = inctest.A()
except:
print "didn't find A"
print "therefore, I didn't include 'testdir/subdir1/hello.i'"
error = 1
pass
try:
b = inctest.B()
except:
print "didn't find B"
print "therefore, I didn't include 'testdir/subdir2/hello.i'"
error = 1
pass
if error == 1:
<|fim_middle|>
# Check the import in subdirectory worked
if inctest.importtest1(5) != 15:
print "import test 1 failed"
raise RuntimeError
if inctest.importtest2("black") != "white":
print "import test 2 failed"
raise RuntimeError
<|fim▁end|> | raise RuntimeError |
<|file_name|>inctest_runme.py<|end_file_name|><|fim▁begin|>import inctest
error = 0
try:
a = inctest.A()
except:
print "didn't find A"
print "therefore, I didn't include 'testdir/subdir1/hello.i'"
error = 1
pass
try:
b = inctest.B()
except:
print "didn't find B"
print "therefore, I didn't include 'testdir/subdir2/hello.i'"
error = 1
pass
if error == 1:
raise RuntimeError
# Check the import in subdirectory worked
if inctest.importtest1(5) != 15:
<|fim_middle|>
if inctest.importtest2("black") != "white":
print "import test 2 failed"
raise RuntimeError
<|fim▁end|> | print "import test 1 failed"
raise RuntimeError |
<|file_name|>inctest_runme.py<|end_file_name|><|fim▁begin|>import inctest
error = 0
try:
a = inctest.A()
except:
print "didn't find A"
print "therefore, I didn't include 'testdir/subdir1/hello.i'"
error = 1
pass
try:
b = inctest.B()
except:
print "didn't find B"
print "therefore, I didn't include 'testdir/subdir2/hello.i'"
error = 1
pass
if error == 1:
raise RuntimeError
# Check the import in subdirectory worked
if inctest.importtest1(5) != 15:
print "import test 1 failed"
raise RuntimeError
if inctest.importtest2("black") != "white":
<|fim_middle|>
<|fim▁end|> | print "import test 2 failed"
raise RuntimeError |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys<|fim▁hole|>sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
<|fim_middle|>
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | ''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = "" |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
<|fim_middle|>
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("") |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
<|fim_middle|>
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
<|fim_middle|>
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
<|fim_middle|>
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
<|fim_middle|>
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
<|fim_middle|>
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
<|fim_middle|>
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time) |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
<|fim_middle|>
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | self.time = ""
correct_time = "" |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | unittest.main() |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def <|fim_middle|>(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | setUp |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def <|fim_middle|>(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_vbad0 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def <|fim_middle|>(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_bad1 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def <|fim_middle|>(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_bad2 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def <|fim_middle|>(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_middle3 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def <|fim_middle|>(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_good4 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def <|fim_middle|>(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_vgood5 |
<|file_name|>test_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def <|fim_middle|>(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | tearDown |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
<|fim▁hole|> conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##<|fim▁end|> | def configure(conf): |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
<|fim_middle|>
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
<|fim_middle|>
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | conf.load('hwaf-base', tooldir=_heptooldir)
return |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
<|fim_middle|>
## EOF ##
<|fim▁end|> | if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
<|fim_middle|>
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | ctx.fatal('load a C compiler first')
pass |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
<|fim_middle|>
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | ctx.fatal('load a C++ compiler first')
pass |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
<|fim_middle|>
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
<|fim_middle|>
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | version=l[l.find("version")+len("version"):].strip()
break |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def <|fim_middle|>(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | options |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def <|fim_middle|>(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | configure |
<|file_name|>find_cmake.py<|end_file_name|><|fim▁begin|># -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def <|fim_middle|>(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
<|fim▁end|> | find_cmake |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>from keras import backend as K
class Config:
def __init__(self):
self.verbose = True
self.network = 'resnet50'
# setting for data augmentation
self.use_horizontal_flips = True
self.use_vertical_flips = True
self.rot_90 = True
# anchor box scales
self.anchor_box_scales = [1, 2, 4, 8, 16, 32, 64, 124, 256, 512]
# anchor box ratios
self.anchor_box_ratios = [[1, 1], [1, 2], [2, 1],[1,3],[3,1],[4,1],[1,4],[1,5],[5,1],[1,6],[6,1],[1,7],[7,1],[1,8],[8,1],[1,9],[9,1]]
# size to resize the smallest side of the image
self.im_size = 600
# image channel-wise mean to subtract
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
# number of ROIs at once<|fim▁hole|> self.num_rois = 8
# stride at the RPN (this depends on the network configuration)
self.rpn_stride = 16
self.balanced_classes = False
# scaling the stdev
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
# overlaps for RPN
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
# overlaps for classifier ROIs
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
# placeholder for the class mapping, automatically generated by the parser
self.class_mapping = None
#location of pretrained weights for the base network
# weight files can be found at:
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
self.model_path = 'model_frcnn.vgg.hdf5'<|fim▁end|> | |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>from keras import backend as K
class Config:
<|fim_middle|>
<|fim▁end|> | def __init__(self):
self.verbose = True
self.network = 'resnet50'
# setting for data augmentation
self.use_horizontal_flips = True
self.use_vertical_flips = True
self.rot_90 = True
# anchor box scales
self.anchor_box_scales = [1, 2, 4, 8, 16, 32, 64, 124, 256, 512]
# anchor box ratios
self.anchor_box_ratios = [[1, 1], [1, 2], [2, 1],[1,3],[3,1],[4,1],[1,4],[1,5],[5,1],[1,6],[6,1],[1,7],[7,1],[1,8],[8,1],[1,9],[9,1]]
# size to resize the smallest side of the image
self.im_size = 600
# image channel-wise mean to subtract
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
# number of ROIs at once
self.num_rois = 8
# stride at the RPN (this depends on the network configuration)
self.rpn_stride = 16
self.balanced_classes = False
# scaling the stdev
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
# overlaps for RPN
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
# overlaps for classifier ROIs
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
# placeholder for the class mapping, automatically generated by the parser
self.class_mapping = None
#location of pretrained weights for the base network
# weight files can be found at:
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
self.model_path = 'model_frcnn.vgg.hdf5' |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>from keras import backend as K
class Config:
def __init__(self):
<|fim_middle|>
<|fim▁end|> | self.verbose = True
self.network = 'resnet50'
# setting for data augmentation
self.use_horizontal_flips = True
self.use_vertical_flips = True
self.rot_90 = True
# anchor box scales
self.anchor_box_scales = [1, 2, 4, 8, 16, 32, 64, 124, 256, 512]
# anchor box ratios
self.anchor_box_ratios = [[1, 1], [1, 2], [2, 1],[1,3],[3,1],[4,1],[1,4],[1,5],[5,1],[1,6],[6,1],[1,7],[7,1],[1,8],[8,1],[1,9],[9,1]]
# size to resize the smallest side of the image
self.im_size = 600
# image channel-wise mean to subtract
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
# number of ROIs at once
self.num_rois = 8
# stride at the RPN (this depends on the network configuration)
self.rpn_stride = 16
self.balanced_classes = False
# scaling the stdev
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
# overlaps for RPN
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
# overlaps for classifier ROIs
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
# placeholder for the class mapping, automatically generated by the parser
self.class_mapping = None
#location of pretrained weights for the base network
# weight files can be found at:
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
self.model_path = 'model_frcnn.vgg.hdf5' |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>from keras import backend as K
class Config:
def <|fim_middle|>(self):
self.verbose = True
self.network = 'resnet50'
# setting for data augmentation
self.use_horizontal_flips = True
self.use_vertical_flips = True
self.rot_90 = True
# anchor box scales
self.anchor_box_scales = [1, 2, 4, 8, 16, 32, 64, 124, 256, 512]
# anchor box ratios
self.anchor_box_ratios = [[1, 1], [1, 2], [2, 1],[1,3],[3,1],[4,1],[1,4],[1,5],[5,1],[1,6],[6,1],[1,7],[7,1],[1,8],[8,1],[1,9],[9,1]]
# size to resize the smallest side of the image
self.im_size = 600
# image channel-wise mean to subtract
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
# number of ROIs at once
self.num_rois = 8
# stride at the RPN (this depends on the network configuration)
self.rpn_stride = 16
self.balanced_classes = False
# scaling the stdev
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
# overlaps for RPN
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
# overlaps for classifier ROIs
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
# placeholder for the class mapping, automatically generated by the parser
self.class_mapping = None
#location of pretrained weights for the base network
# weight files can be found at:
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5
# https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
self.model_path = 'model_frcnn.vgg.hdf5'
<|fim▁end|> | __init__ |
<|file_name|>get_account_urls.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from keyring import get_password
from boto.iam.connection import IAMConnection
import lib.LoadBotoConfig as BotoConfig<|fim▁hole|>
from sys import exit
envs = ['dev', 'qa', 'staging', 'demo', 'prod']
for env in envs:
id = BotoConfig.config.get(env, 'aws_access_key_id')
key = get_password(BotoConfig.config.get(env, 'keyring'), id)
conn = IAMConnection(aws_access_key_id=id, aws_secret_access_key=key)
print(conn.get_signin_url())<|fim▁end|> | |
<|file_name|>test_skip_checkpoint_abort_transaction.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import shutil
import unittest2 as unittest
from gppylib.db import dbconn
from gppylib.commands.base import Command
from gppylib.commands.gp import GpStart, GpStop
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class transactions(MPPTestCase):
def test_skip_checkpoint_abort_transaction(self):
"""
@description FATAL failure execution handles already committed transactions properly
@created 2013-04-19 00:00:00
@modified 2013-04-19 00:00:00
@tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction
@product_version gpdb: [4.1.2.5- main]
Repro steps:
1. GPDB is up and running, number of segments is irrelevant, no master standby is required,
no segment mirroring is required
2. inject fault on master for skipping checkpoints
> gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0
3. inject fault 'fatal' on master, it aborts already committed local transaction
> gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress
4. create table 'test'<|fim▁hole|> > PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1
begin;
create table test21(a int);
insert into test21(a) values(10);
truncate table test21;
commit;
6. Wait 5 minutes
7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix,
GPDB comes up with versions with fix
> gpstop -air
"""
master_port = os.getenv("PGPORT", "5432")
cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0")
cmd.run()
cmd = Command(name="gpfaultinjector",
cmdStr="gpfaultinjector -p %s -m async -s 1 \
-f local_tm_record_transaction_commit -y panic_suppress" % master_port)
cmd.run()
PSQL.run_sql_command("create table mpp17817(a int)")
sql_file = local_path('mpp17817.sql')
PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility")
time.sleep(300)
cmd = Command(name="gpstop restart immediate",
cmdStr="source %s/greenplum_path.sh;\
gpstop -air" % os.environ["GPHOME"])
cmd.run(validateAfter=True)
# Cleanup
PSQL.run_sql_command("drop table mpp17817")
PSQL.run_sql_command("drop table mpp17817_21")<|fim▁end|> | > psql template1 -c 'create table test(a int);'
5. connect in utility mode to master and create table, insert rows into table and truncate table |
<|file_name|>test_skip_checkpoint_abort_transaction.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import shutil
import unittest2 as unittest
from gppylib.db import dbconn
from gppylib.commands.base import Command
from gppylib.commands.gp import GpStart, GpStop
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class transactions(MPPTestCase):
<|fim_middle|>
<|fim▁end|> | def test_skip_checkpoint_abort_transaction(self):
"""
@description FATAL failure execution handles already committed transactions properly
@created 2013-04-19 00:00:00
@modified 2013-04-19 00:00:00
@tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction
@product_version gpdb: [4.1.2.5- main]
Repro steps:
1. GPDB is up and running, number of segments is irrelevant, no master standby is required,
no segment mirroring is required
2. inject fault on master for skipping checkpoints
> gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0
3. inject fault 'fatal' on master, it aborts already committed local transaction
> gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress
4. create table 'test'
> psql template1 -c 'create table test(a int);'
5. connect in utility mode to master and create table, insert rows into table and truncate table
> PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1
begin;
create table test21(a int);
insert into test21(a) values(10);
truncate table test21;
commit;
6. Wait 5 minutes
7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix,
GPDB comes up with versions with fix
> gpstop -air
"""
master_port = os.getenv("PGPORT", "5432")
cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0")
cmd.run()
cmd = Command(name="gpfaultinjector",
cmdStr="gpfaultinjector -p %s -m async -s 1 \
-f local_tm_record_transaction_commit -y panic_suppress" % master_port)
cmd.run()
PSQL.run_sql_command("create table mpp17817(a int)")
sql_file = local_path('mpp17817.sql')
PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility")
time.sleep(300)
cmd = Command(name="gpstop restart immediate",
cmdStr="source %s/greenplum_path.sh;\
gpstop -air" % os.environ["GPHOME"])
cmd.run(validateAfter=True)
# Cleanup
PSQL.run_sql_command("drop table mpp17817")
PSQL.run_sql_command("drop table mpp17817_21") |
<|file_name|>test_skip_checkpoint_abort_transaction.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import shutil
import unittest2 as unittest
from gppylib.db import dbconn
from gppylib.commands.base import Command
from gppylib.commands.gp import GpStart, GpStop
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class transactions(MPPTestCase):
def test_skip_checkpoint_abort_transaction(self):
<|fim_middle|>
<|fim▁end|> | """
@description FATAL failure execution handles already committed transactions properly
@created 2013-04-19 00:00:00
@modified 2013-04-19 00:00:00
@tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction
@product_version gpdb: [4.1.2.5- main]
Repro steps:
1. GPDB is up and running, number of segments is irrelevant, no master standby is required,
no segment mirroring is required
2. inject fault on master for skipping checkpoints
> gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0
3. inject fault 'fatal' on master, it aborts already committed local transaction
> gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress
4. create table 'test'
> psql template1 -c 'create table test(a int);'
5. connect in utility mode to master and create table, insert rows into table and truncate table
> PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1
begin;
create table test21(a int);
insert into test21(a) values(10);
truncate table test21;
commit;
6. Wait 5 minutes
7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix,
GPDB comes up with versions with fix
> gpstop -air
"""
master_port = os.getenv("PGPORT", "5432")
cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0")
cmd.run()
cmd = Command(name="gpfaultinjector",
cmdStr="gpfaultinjector -p %s -m async -s 1 \
-f local_tm_record_transaction_commit -y panic_suppress" % master_port)
cmd.run()
PSQL.run_sql_command("create table mpp17817(a int)")
sql_file = local_path('mpp17817.sql')
PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility")
time.sleep(300)
cmd = Command(name="gpstop restart immediate",
cmdStr="source %s/greenplum_path.sh;\
gpstop -air" % os.environ["GPHOME"])
cmd.run(validateAfter=True)
# Cleanup
PSQL.run_sql_command("drop table mpp17817")
PSQL.run_sql_command("drop table mpp17817_21") |
<|file_name|>test_skip_checkpoint_abort_transaction.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import shutil
import unittest2 as unittest
from gppylib.db import dbconn
from gppylib.commands.base import Command
from gppylib.commands.gp import GpStart, GpStop
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class transactions(MPPTestCase):
def <|fim_middle|>(self):
"""
@description FATAL failure execution handles already committed transactions properly
@created 2013-04-19 00:00:00
@modified 2013-04-19 00:00:00
@tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction
@product_version gpdb: [4.1.2.5- main]
Repro steps:
1. GPDB is up and running, number of segments is irrelevant, no master standby is required,
no segment mirroring is required
2. inject fault on master for skipping checkpoints
> gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0
3. inject fault 'fatal' on master, it aborts already committed local transaction
> gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress
4. create table 'test'
> psql template1 -c 'create table test(a int);'
5. connect in utility mode to master and create table, insert rows into table and truncate table
> PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1
begin;
create table test21(a int);
insert into test21(a) values(10);
truncate table test21;
commit;
6. Wait 5 minutes
7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix,
GPDB comes up with versions with fix
> gpstop -air
"""
master_port = os.getenv("PGPORT", "5432")
cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0")
cmd.run()
cmd = Command(name="gpfaultinjector",
cmdStr="gpfaultinjector -p %s -m async -s 1 \
-f local_tm_record_transaction_commit -y panic_suppress" % master_port)
cmd.run()
PSQL.run_sql_command("create table mpp17817(a int)")
sql_file = local_path('mpp17817.sql')
PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility")
time.sleep(300)
cmd = Command(name="gpstop restart immediate",
cmdStr="source %s/greenplum_path.sh;\
gpstop -air" % os.environ["GPHOME"])
cmd.run(validateAfter=True)
# Cleanup
PSQL.run_sql_command("drop table mpp17817")
PSQL.run_sql_command("drop table mpp17817_21")
<|fim▁end|> | test_skip_checkpoint_abort_transaction |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,<|fim▁hole|> def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants<|fim▁end|> | meta={'grants':grants})
|
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
<|fim_middle|>
<|fim▁end|> | name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
<|fim_middle|>
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)] |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
<|fim_middle|>
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants}) |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
<|fim_middle|>
<|fim▁end|> | hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
<|fim_middle|>
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)] |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
<|fim_middle|>
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | grants[normalized_field] = values.xpath('a/text()').extract()[1] |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
<|fim_middle|>
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | grants[normalized_field] = values.xpath('a/text()').extract()[0] |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
<|fim_middle|>
yield grants
<|fim▁end|> | grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1] |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def <|fim_middle|>(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | __init__ |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def <|fim_middle|>(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | parse |
<|file_name|>grants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def <|fim_middle|>(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
<|fim▁end|> | parse_project |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
<|fim▁hole|> if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
<|fim_middle|>
<|fim▁end|> | print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api') |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
<|fim_middle|>
else:
return redirect('/api')
<|fim▁end|> | language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain') |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
<|fim_middle|>
<|fim▁end|> | return redirect('/api') |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def <|fim_middle|>(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')
<|fim▁end|> | add |
<|file_name|>emailer_plugin.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup)
def activate (self, pluggable):<|fim▁hole|> self.add_to_uimanager(pluggable.ui_manager)
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments()<|fim▁end|> | self.rg = self.pluggable = pluggable |
<|file_name|>emailer_plugin.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
<|fim_middle|>
<|fim▁end|> | ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup)
def activate (self, pluggable):
self.rg = self.pluggable = pluggable
self.add_to_uimanager(pluggable.ui_manager)
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments() |
<|file_name|>emailer_plugin.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
<|fim_middle|>
def activate (self, pluggable):
self.rg = self.pluggable = pluggable
self.add_to_uimanager(pluggable.ui_manager)
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments()
<|fim▁end|> | self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup) |
<|file_name|>emailer_plugin.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup)
def activate (self, pluggable):
<|fim_middle|>
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments()
<|fim▁end|> | self.rg = self.pluggable = pluggable
self.add_to_uimanager(pluggable.ui_manager) |
<|file_name|>emailer_plugin.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup)
def activate (self, pluggable):
self.rg = self.pluggable = pluggable
self.add_to_uimanager(pluggable.ui_manager)
def get_selected_recs (self):
<|fim_middle|>
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments()
<|fim▁end|> | recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.