prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>regex_matcherator_naturectr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
patterns = [r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$']*10
strings = ["/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470409.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470408_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470407_alt01.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470406_1.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/346880405.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470404_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470403.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/336470402.jpg"]*10
def <|fim_middle|>(str, patterns):
for pattern in patterns:
if pattern.match(str):
return pattern.match(str), pattern
return False
def regex_matcherator(strings,patterns):
import re
compiled_patterns = list(map(re.compile, patterns))
for s in strings:
if matches_pattern(s, compiled_patterns):
print matches_pattern(s, compiled_patterns)[1].pattern
print '--'.join(s.split('/')[-2:])
print matches_pattern(s, compiled_patterns)[0].groups()
print '\n'
r = regex_matcherator(strings,patterns)
#print r.next()<|fim▁end|> | matches_pattern |
<|file_name|>regex_matcherator_naturectr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
patterns = [r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$']*10
strings = ["/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470409.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470408_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470407_alt01.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470406_1.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/346880405.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470404_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470403.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/336470402.jpg"]*10
def matches_pattern(str, patterns):
for pattern in patterns:
if pattern.match(str):
return pattern.match(str), pattern
return False
def <|fim_middle|>(strings,patterns):
import re
compiled_patterns = list(map(re.compile, patterns))
for s in strings:
if matches_pattern(s, compiled_patterns):
print matches_pattern(s, compiled_patterns)[1].pattern
print '--'.join(s.split('/')[-2:])
print matches_pattern(s, compiled_patterns)[0].groups()
print '\n'
r = regex_matcherator(strings,patterns)
#print r.next()<|fim▁end|> | regex_matcherator |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Phaser Editor documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 08:35:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
#'rinoh.frontend.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Phaser Editor 2D'
copyright = u'2016-2020, Arian Fornaris'
author = u'Arian Fornaris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.1.7'
# The full version, including alpha/beta/rc tags.
release = u'2.1.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
<|fim▁hole|>#import sphinx_rtd_theme
html_theme = "phaser-editor"
# Uncomment for generate Eclipse Offline Help
#html_theme = "eclipse-help"
html_theme_path = ["_themes"]
html_show_sourcelink = False
html_show_sphinx = False
html_favicon = "logo.png"
html_title = "Phaser Editor Help"
html_show_copyright = True
print(html_theme_path)
#html_theme = 'classic'
highlight_language = 'javascript'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhaserEditordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '',
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PhaserEditor2D.tex', u'Phaser Editor 2D Documentation',
u'Arian Fornaris', 'manual'),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PhaserEditor2D', u'Phaser Editor 2D Documentation',
author, 'Arian', 'A friendly HTML5 game IDE.',
'Miscellaneous'),
]<|fim▁end|> |
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# |
<|file_name|>sysvipc.py<|end_file_name|><|fim▁begin|>## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <[email protected]>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""SysV IPC related information
"""
plugin_name = "sysvipc"
def setup(self):
self.add_copy_specs([
"/proc/sysvipc/msg",<|fim▁hole|> ])
self.add_cmd_output("ipcs")
# vim: et ts=4 sw=4<|fim▁end|> | "/proc/sysvipc/sem",
"/proc/sysvipc/shm" |
<|file_name|>sysvipc.py<|end_file_name|><|fim▁begin|>## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <[email protected]>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
<|fim_middle|>
# vim: et ts=4 sw=4
<|fim▁end|> | """SysV IPC related information
"""
plugin_name = "sysvipc"
def setup(self):
self.add_copy_specs([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output("ipcs") |
<|file_name|>sysvipc.py<|end_file_name|><|fim▁begin|>## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <[email protected]>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""SysV IPC related information
"""
plugin_name = "sysvipc"
def setup(self):
<|fim_middle|>
# vim: et ts=4 sw=4
<|fim▁end|> | self.add_copy_specs([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output("ipcs") |
<|file_name|>sysvipc.py<|end_file_name|><|fim▁begin|>## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <[email protected]>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""SysV IPC related information
"""
plugin_name = "sysvipc"
def <|fim_middle|>(self):
self.add_copy_specs([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output("ipcs")
# vim: et ts=4 sw=4
<|fim▁end|> | setup |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()<|fim▁hole|><|fim▁end|> | self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image)) |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
<|fim_middle|>
<|fim▁end|> | def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image)) |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
<|fim_middle|>
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
<|fim▁end|> | if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home') |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
<|fim_middle|>
<|fim▁end|> | for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image)) |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
<|fim_middle|>
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
<|fim▁end|> | self.description = frappe._("This is an example website auto-generated from ERPNext") |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
<|fim_middle|>
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
<|fim▁end|> | doc.save() |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def <|fim_middle|>(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
<|fim▁end|> | validate |
<|file_name|>homepage.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def <|fim_middle|>(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
<|fim▁end|> | setup_items |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results<|fim▁hole|> examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
<|fim_middle|>
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | """Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op) |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
<|fim_middle|>
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | """Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch)) |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
<|fim_middle|>
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | self._step = -1 |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
<|fim_middle|>
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value. |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
<|fim_middle|>
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch)) |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
<|fim_middle|>
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train() |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def <|fim_middle|>():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | train |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def <|fim_middle|>(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | begin |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def <|fim_middle|>(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | before_run |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def <|fim_middle|>(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | after_run |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def <|fim_middle|>(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | main |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
<|fim_middle|>
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch)) |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
<|fim_middle|>
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()<|fim▁end|> | tf.gfile.DeleteRecursively(FLAGS.train_dir) |
<|file_name|>cifar10_train.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | tf.app.run() |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""heroku_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include<|fim▁hole|>"""
from django.conf.urls import include, url
from django.contrib import admin
from blog.views import index, signup, login, logout
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^signup', signup, name='signup'),
url(r'^login', login, name='login'),
url(r'^logout', logout, name='logout'),
url(r'^admin/', include(admin.site.urls)),
]<|fim▁end|> | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>def linear_search(lst,size,value):
i = 0<|fim▁hole|> return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()<|fim▁end|> | while i < size:
if lst[i] == value:
return i
i = i + 1 |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
<|fim_middle|>
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
<|fim▁end|> | i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1 |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
<|fim_middle|>
if __name__ == '__main__':
main()
<|fim▁end|> | lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index)) |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
<|fim_middle|>
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
<|fim▁end|> | return i |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
<|fim_middle|>
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
<|fim▁end|> | print(str(value) + " was not found in that array\n") |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
<|fim_middle|>
if __name__ == '__main__':
main()
<|fim▁end|> | print(str(value) + " was found at index " + str(index)) |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | main() |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def <|fim_middle|>(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
<|fim▁end|> | linear_search |
<|file_name|>linear_search.py<|end_file_name|><|fim▁begin|>
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def <|fim_middle|>():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
<|fim▁end|> | main |
<|file_name|>__openerp__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright© 2016 ICTSTUDIO <http://www.ictstudio.eu>
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "MIS Builder Cost Center Filter",
'version': '8.0.1.0.0',
'category': 'Reporting',
'summary': """
Add Cost Center filters to MIS Reports
""",
'author':
'ICTSTUDIO,'
'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': "http://www.ictstudio.eu",
'license': 'AGPL-3',
'depends': [
'mis_builder',<|fim▁hole|> 'views/mis_report_view.xml',
'views/mis_builder_cost_center.xml',
],
'qweb': [
'static/src/xml/mis_widget.xml'
],
}<|fim▁end|> | 'account_cost_center'
],
'data': [ |
<|file_name|>constants.py<|end_file_name|><|fim▁begin|># Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for music processing in Magenta."""
# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4
# Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220
# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2
# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12<|fim▁hole|>
# Program-related constants.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127
# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))
# Chord symbol for "no chord".
NO_CHORD = 'N.C.'
# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
# for i in range(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]
]<|fim▁end|> |
# Velocity-related constants.
MIN_MIDI_VELOCITY = 1 # Inclusive.
MAX_MIDI_VELOCITY = 127 # Inclusive. |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)<|fim▁hole|>
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | return filepath
return None |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['revie<|fim_middle|>
d, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | ws']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, wor |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio<|fim_middle|>
name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | /%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_ |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(<|fim_middle|>
, stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('' |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_f <|fim_middle|>
o']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | iles('uk', item['content'], item['uk_audio'])
if item['us_audi |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_f <|fim_middle|>
ate(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | iles('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.cre |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects <|fim_middle|>
ate(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | .cre |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter <|fim_middle|>
alize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | _content(1024):
handle.write(block)
return filepath
return None
def seri |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json <|fim_middle|>
<|fim▁end|> | ") |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
da<|fim_middle|>ata']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | ta = data['d |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):<|fim_middle|>ath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> |
filep |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
if response.ok:
# Something went wrong
for block in response.iter_content(1024):
handle.write(block)
return filepath
return None
def serialize_data(file_name):<|fim_middle|>""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> |
"""
" |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""Tests for Airly."""<|fim▁end|> | |
<|file_name|>production.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use mailgun to send emails
- Use redis
'''
from __future__ import absolute_import, unicode_literals
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')<|fim▁hole|>INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[{{cookiecutter.project_name}}] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# CACHE CONFIGURATION
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': [
'redis:6379',
],
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
'MAX_CONNECTIONS': 1000,
'PICKLE_VERSION': -1,
},
},
}
# ASSET CONFIGURATION
# ------------------------------------------------------------------------------
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/static'
MEDIA_ROOT = '/media'
STATICFILES_DIRS = (
unicode(APPS_DIR.path("static")),
)
{% if cookiecutter.use_celery %}
# CELERY BROKER CONFIGURATION
# ------------------------------------------------------------------------------
BROKER_URL = "amqp://guest:guest@rabbitmq:5672//"
{% endif %}
{% if cookiecutter.use_sentry %}
# SENTRY CONFIGURATION
# ------------------------------------------------------------------------------
RAVEN_CONFIG = {
'dsn': env("SENTRY_URL"),
}
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
{% endif %}
# Your production stuff: Below this line define 3rd party library settings<|fim▁end|> |
# django-secure
# ------------------------------------------------------------------------------ |
<|file_name|>rst2xml.py<|end_file_name|><|fim▁begin|>#!/var/www/horizon/.venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:<|fim▁hole|>
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)<|fim▁end|> | pass
from docutils.core import publish_cmdline, default_description |
<|file_name|>openrc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import os
# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**. The catalog contains the endpoint for all services the
# user/tenant has access to - including nova, glance, keystone, swift.
#
# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
# will use the 1.1 *compute api*
os.environ['OS_AUTH_URL'] = "https://keystone.rc.nectar.org.au:5000/v2.0/"
# With the addition of Keystone we have standardized on the term **tenant**
# as the entity that owns the resources.
os.environ['OS_TENANT_ID'] = "123456789012345678901234567890"
os.environ['OS_TENANT_NAME'] = "tenant_name"
# In addition to the owning entity (tenant), openstack stores the entity
# performing the action as the **user**.
os.environ['OS_USERNAME'] = "[email protected]"
# With Keystone you pass the keystone password.<|fim▁hole|><|fim▁end|> | os.environ['OS_PASSWORD'] = "????????????????????" |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# pkpgcounter : a generic Page Description Language parser
#
# (c) 2003-2009 Jerome Alet <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
#
import sys
import glob
import os
import shutil
try :
from distutils.core import setup
except ImportError as msg :
sys.stderr.write("%s\n" % msg)
sys.stderr.write("You need the DistUtils Python module.\nunder Debian, you may have to install the python-dev package.\nOf course, YMMV.\n")
sys.exit(-1)
try :
from PIL import Image
except ImportError :
sys.stderr.write("You need the Python Imaging Library (aka PIL).\nYou can grab it from http://www.pythonware.com\n")
sys.exit(-1)
sys.path.insert(0, "pkpgpdls")
from pkpgpdls.version import __version__, __doc__<|fim▁hole|>for mofile in mofiles :
lang = mofile.split(os.sep)[1]
directory = os.sep.join(["share", "locale", lang, "LC_MESSAGES"])
data_files.append((directory, [ mofile ]))
docdir = "share/doc/pkpgcounter"
docfiles = ["README", "COPYING", "BUGS", "CREDITS", "AUTHORS", "TODO"]
data_files.append((docdir, docfiles))
if os.path.exists("ChangeLog") :
data_files.append((docdir, ["ChangeLog"]))
directory = os.sep.join(["share", "man", "man1"])
manpages = glob.glob(os.sep.join(["man", "*.1"]))
data_files.append((directory, manpages))
setup(name = "pkpgcounter", version = __version__,
license = "GNU GPL",
description = __doc__,
author = "Jerome Alet",
author_email = "[email protected]",
url = "http://www.pykota.com/software/pkpgcounter/",
packages = [ "pkpgpdls" ],
scripts = [ "bin/pkpgcounter" ],
data_files = data_files)<|fim▁end|> |
data_files = []
mofiles = glob.glob(os.sep.join(["po", "*", "*.mo"])) |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# pkpgcounter : a generic Page Description Language parser
#
# (c) 2003-2009 Jerome Alet <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
#
import sys
import glob
import os
import shutil
try :
from distutils.core import setup
except ImportError as msg :
sys.stderr.write("%s\n" % msg)
sys.stderr.write("You need the DistUtils Python module.\nunder Debian, you may have to install the python-dev package.\nOf course, YMMV.\n")
sys.exit(-1)
try :
from PIL import Image
except ImportError :
sys.stderr.write("You need the Python Imaging Library (aka PIL).\nYou can grab it from http://www.pythonware.com\n")
sys.exit(-1)
sys.path.insert(0, "pkpgpdls")
from pkpgpdls.version import __version__, __doc__
data_files = []
mofiles = glob.glob(os.sep.join(["po", "*", "*.mo"]))
for mofile in mofiles :
lang = mofile.split(os.sep)[1]
directory = os.sep.join(["share", "locale", lang, "LC_MESSAGES"])
data_files.append((directory, [ mofile ]))
docdir = "share/doc/pkpgcounter"
docfiles = ["README", "COPYING", "BUGS", "CREDITS", "AUTHORS", "TODO"]
data_files.append((docdir, docfiles))
if os.path.exists("ChangeLog") :
<|fim_middle|>
directory = os.sep.join(["share", "man", "man1"])
manpages = glob.glob(os.sep.join(["man", "*.1"]))
data_files.append((directory, manpages))
setup(name = "pkpgcounter", version = __version__,
license = "GNU GPL",
description = __doc__,
author = "Jerome Alet",
author_email = "[email protected]",
url = "http://www.pykota.com/software/pkpgcounter/",
packages = [ "pkpgpdls" ],
scripts = [ "bin/pkpgcounter" ],
data_files = data_files)
<|fim▁end|> | data_files.append((docdir, ["ChangeLog"])) |
<|file_name|>get_software_catalogue.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
'''
Created on 16/04/2013
@author: henar
'''
import httplib
import sys
import os
from xml.dom.minidom import parse, parseString
from xml.dom.minidom import getDOMImplementation
from xml.etree.ElementTree import Element, SubElement, tostring
import md5
import httplib, urllib
import utils
token = utils.obtainToken(keystone_ip, keystone_port, user, password, project)
print(token)
headers = {'Content-Type': 'application/xml', 'X-Auth-Token': token, 'Tenant-ID': vdc}
print(headers)
print('Get products in the software catalogue: ')
resource = "/sdc/rest/catalog/product"
data1 = utils.doRequestHttpOperation(domine, port, resource, 'GET', None, headers)
dom = parseString(data1)
try:
product = (dom.getElementsByTagName('product'))[0]
productname = product.firstChild.firstChild.nodeValue
print('First product in the software catalogue: ' + productname)
except:
print ("Error in the request to get products")
sys.exit(1)
print('Get Product Details ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name, 'GET', None, headers)
print(" OK")
print('Get Product Releases ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name + "/release", 'GET',
None, headers)
print(" OK")
print('Get Product Release Info ' + product_name + " " + product_version )
data1 = utils.doRequestHttpOperation(domine, port,
"/sdc/rest/catalog/product/" + product_name + "/release/" + product_version, 'GET', None, headers)
print(" OK")
print('Get Product Attributes ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name + '/attributes', 'GET',
None, headers)
print(" OK")
resource_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance"
print('Install a product in VM. Product ' + product_name )
productInstanceDto = utils.createProductInstanceDto(vm_ip, vm_fqn, product_name, product_version)
print (tostring(productInstanceDto))
task = utils.doRequestHttpOperation(domine, port, resource_product_instance, 'POST', tostring(productInstanceDto),
headers)
print (task)
status = utils.processTask(domine, port, task)
print (" " + status)
resource_get_info_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance/" + vm_fqn + '_' + product_name + '_' + product_version
print('Get Product Instance Info. Product ' + product_name )
data = utils.doRequestHttpOperation(domine, port, resource_get_info_product_instance, 'GET', None)<|fim▁hole|>#if status != 'INSTALLED':
# print("Status not correct" + status)
resource_delete_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance/" + vm_fqn + '_' + product_name + '_' + product_version
print('Get Delete Product Instance ' + product_name )
task = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'DELETE', None)
status = utils.processTask(domine, port, task)
print(" OK")
data = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'GET', None)
statusProduct = utils.processProductInstanceStatus(data)
#if status != 'UNINSTALLED':
# print("Status not correct" + statusProduct)<|fim▁end|> | print(data)
status = utils.processProductInstanceStatus(data) |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name<|fim▁hole|> def clear(self):
self.cookies = {}<|fim▁end|> | ] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
|
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
<|fim_middle|>
<|fim▁end|> | def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {} |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
<|fim_middle|>
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | self.cookies = {}
self.plugin = pluginname
self.account = account |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
<|fim_middle|>
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
<|fim_middle|>
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | return list(self.cookies.values()) |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
<|fim_middle|>
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
<|fim_middle|>
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | return self.parse_cookie(name) |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
<|fim_middle|>
def clear(self):
self.cookies = {}
<|fim▁end|> | self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}" |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
<|fim_middle|>
<|fim▁end|> | self.cookies = {} |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
<|fim_middle|>
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | return self.cookies[name].split("\t")[6] |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
<|fim_middle|>
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | return None |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def <|fim_middle|>(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | __init__ |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def <|fim_middle|>(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | add_cookies |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def <|fim_middle|>(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | get_cookies |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def <|fim_middle|>(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | parse_cookie |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def <|fim_middle|>(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | get_cookie |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def <|fim_middle|>(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
<|fim▁end|> | set_cookie |
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def <|fim_middle|>(self):
self.cookies = {}
<|fim▁end|> | clear |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data<|fim▁hole|> numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)<|fim▁end|> | data = numpy.random.random(size=(100,5)) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
<|fim_middle|>
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
<|fim_middle|>
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
<|fim_middle|>
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
<|fim_middle|>
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
<|fim_middle|>
<|fim▁end|> | tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
<|fim_middle|>
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
<|fim_middle|>
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | tmpfile = comm.bcast(None) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
<|fim_middle|>
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | os.unlink(tmpfile) |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def <|fim_middle|>(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | test_hdf |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def <|fim_middle|>(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | test_query_range |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def <|fim_middle|>(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | test_csv |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def <|fim_middle|>(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | test_stack_glob |
<|file_name|>test_file.py<|end_file_name|><|fim▁begin|>from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def <|fim_middle|>(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
<|fim▁end|> | test_stack_list |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']<|fim▁hole|> form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)<|fim▁end|> | form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"] |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
<|fim_middle|>
<|fim▁end|> | def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form) |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
<|fim_middle|>
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)
<|fim▁end|> | buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True) |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
<|fim_middle|>
<|fim▁end|> | form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form) |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
<|fim_middle|>
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)
<|fim▁end|> | if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.') |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
<|fim_middle|>
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)
<|fim▁end|> | file = form.profile_img.data
self.saveProfileImage(file) |
<|file_name|>setting.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
<|fim_middle|>
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)
<|fim▁end|> | flash('Successfully saved.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.