prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
<|fim_middle|>
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | y_val = 0
x_val += 1 |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
<|fim_middle|>
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | plt.savefig(filename) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
<|fim_middle|>
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | plt.show() |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
<|fim_middle|>
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | shape = type_format[values[stitcher.TYPE_ATTR]] |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
<|fim_middle|>
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | color = 'b'
alpha = 0.2 |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
<|fim_middle|>
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | color = 'r' |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
<|fim_middle|>
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | color = 'y' |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
<|fim_middle|>
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | dotted_line.append((src, trg)) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
<|fim_middle|>
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | normal_line.append((src, trg)) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
<|fim_middle|>
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | plt.savefig(filename) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
<|fim_middle|>
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | plt.show() |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
<|fim_middle|>
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
<|fim_middle|>
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue') |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
<|fim_middle|>
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE) |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
<|fim_middle|>
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue') |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
<|fim_middle|>
return int(n_rows), int(n_cols)
<|fim▁end|> | n_cols += 1 |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def <|fim_middle|>(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | show |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def <|fim_middle|>(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | _plot_subplot |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def <|fim_middle|>(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | show_3d |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def <|fim_middle|>(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | _plot_3d_subplot |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def <|fim_middle|>(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
<|fim▁end|> | _get_size |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):<|fim▁hole|> else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)<|fim▁end|> | completed.add(f.split('.')[0]) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
<|fim_middle|>
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
<|fim_middle|>
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | def __init__(self, sbid, url):
self.sbid = sbid
self.url = url |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
<|fim_middle|>
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | self.sbid = sbid
self.url = url |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
<|fim_middle|>
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name))) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
<|fim_middle|>
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
<|fim_middle|>
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
<|fim_middle|>
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
<|fim_middle|>
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join() |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
<|fim_middle|>
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | """
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e)) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
<|fim_middle|>
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | print crawl(argv[1], '/scratch/pdfs') |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
<|fim_middle|>
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
<|fim_middle|>
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url)) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
<|fim_middle|>
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | result = urllib2.urlopen(url).read() |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
<|fim_middle|>
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | sleep_time = random.randint(0, 2 ** i - 1) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
<|fim_middle|>
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name))) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
<|fim_middle|>
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name))) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
<|fim_middle|>
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
<|fim_middle|>
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | continue |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
<|fim_middle|>
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | l.append(row) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
<|fim_middle|>
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | completed.add(f.split('.')[0]) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
<|fim_middle|>
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | os.remove(filepath)
print 'deleted: ', filepath, head_line |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
<|fim_middle|>
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | excluded = get_completed_tasks(output_folder) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
<|fim_middle|>
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | continue |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
<|fim_middle|>
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | break |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
<|fim_middle|>
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | continue |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
<|fim_middle|>
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | print "%i has finished %i" % (threading.current_thread().ident, finished) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | import sys
main(sys.argv) |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def <|fim_middle|>(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | create_logger |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def <|fim_middle|>(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | __init__ |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def <|fim_middle|>(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | retrieve |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def <|fim_middle|>(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | _urlfetch |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def <|fim_middle|>(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | get_tasks |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def <|fim_middle|>(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | get_completed_tasks |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def <|fim_middle|>(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | crawl |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def <|fim_middle|>(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def main(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | crawler |
<|file_name|>crawler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
crawler.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import csv
import urllib2
import urllib
import re
import os
import urlparse
import threading
import logging
import logging.handlers
import time
import random
import bs4
MINIMUM_PDF_SIZE = 4506
TASKS = None
def create_logger(filename, logger_name=None):
logger = logging.getLogger(logger_name or filename)
fmt = '[%(asctime)s] %(levelname)s %(message)s'
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1024 * 1024 * 1024, backupCount=10)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
log = create_logger('crawl.log')
class ExceedMaximumRetryError(Exception):
def __init__(self, sbid, url):
self.sbid = sbid
self.url = url
def retrieve(url, sbid, output_folder):
"""Download the PDF or search for the webpage for any PDF link
Args:
url, assuming the input url is valid
"""
def _urlfetch(url, sbid, filename=None, retry=10):
"""
A wrapper for either urlopen or urlretrieve. It depends on the whether
there is a filename as input
"""
if filename and os.path.exists(filename):
log.warn("%s\tDUPLICATED\t%s" % (sbid, url))
return None
sleep_time = random.random() + 0.5
for i in range(1, retry+1):
try:
result = None
if filename:
result = urllib.urlretrieve(url, filename)
log.info("%s\tOK\t%s" % (sbid, url))
else:
# No log now, because later we would like to ensure
# the existance of PDFs
result = urllib2.urlopen(url).read()
return result
except urllib.ContentTooShortError as e:
log.warn("%s\tContentTooShortError\t%s\tRetry:%i&Sleep:%.2f" %
(sbid, url, i, sleep_time))
time.sleep(sleep_time)
except urllib2.HTTPError as e:
log.warn("%s\tHTTP%i\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, e.code, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
# Sleep longer if it is server error
# http://en.wikipedia.org/wiki/Exponential_backoff
if e.code / 100 == 5:
sleep_time = random.randint(0, 2 ** i - 1)
except urllib2.URLError as e:
log.warn("%s\tURLError\t%s\tRetry:%i&Sleep:%.2f\t%s" %
(sbid, url, i, sleep_time, e.reason))
time.sleep(sleep_time)
raise ExceedMaximumRetryError(sbid=sbid, url=url)
if url.endswith('.pdf'):
#: sbid is not unique, so use sbid+pdfname as new name
pdf_name = url.split('/')[-1].split('.')[0]
_urlfetch(url, sbid, os.path.join(output_folder, "%s.%s.pdf" % (sbid, pdf_name)))
else:
page = _urlfetch(url, sbid)
soup = bs4.BeautifulSoup(page)
anchors = soup.findAll('a', attrs={'href': re.compile(".pdf$", re.I)})
if not anchors:
log.warn("%s\tNO_PDF_DETECTED\t%s" % (sbid, url))
return None
for a in anchors:
href = a['href']
pdf_name = href.split('/')[-1]
sub_url = urlparse.urljoin(url, href)
_urlfetch(sub_url, sbid, os.path.join(output_folder, "%s.%s" % (sbid, pdf_name)))
def get_tasks(csv_filepath):
"""
Returns:
[{'ScienceBaseID': a1b2c3d4, 'webLinks__uri': 'http://balabala'}, {}]
"""
l = []
with open(csv_filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if 'Action' in row and row['Action'].lower() == 'ignore for now':
continue
else:
l.append(row)
return l
def get_completed_tasks(output_folder):
"""
Return downloaded tasks
"""
completed = set()
for f in os.listdir(output_folder):
filepath = os.path.join(output_folder, f)
with open(filepath, 'r') as ff:
head_line = ff.readline()
#if os.stat(filepath).st_size > MINIMUM_PDF_SIZE:
if head_line.startswith("%PDF"):
completed.add(f.split('.')[0])
else:
os.remove(filepath)
print 'deleted: ', filepath, head_line
return completed
def crawl(csv_filepath, output_folder='pdfs', exclude_downloaded=False):
"""main function
"""
global TASKS
TASKS = get_tasks(csv_filepath)
excluded = set()
if exclude_downloaded:
excluded = get_completed_tasks(output_folder)
for i in range(128):
t = threading.Thread(target=crawler, args=(output_folder, excluded))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def crawler(output_folder, excluded=set()):
"""
Thread working function
"""
finished = 0
print "thread %i has started, exclude %i items" %\
(threading.current_thread().ident, len(excluded))
global TASKS
while True:
task = None
try:
task = TASKS.pop()
except IndexError:
print "thread %i finished %i tasks, exiting for no task available"\
% (threading.current_thread().ident, finished)
break
try:
if not task:
break
sbid = task['ScienceBaseID']
# some webLinks__uri looks like:
# http://www.springerlink.com/content/p543611u8317w447/?p=a0e7243d602f4bd3b33b2089b2ed92e4&pi=5 ; http://www.springerlink.com/content/p543611u8317w447/fulltext.pdf
# since both url will redirect to the same url finally, I did not retrieve them twice
url = task['webLinks__uri']
if sbid in excluded:
continue
retrieve(url, sbid, output_folder)
finished += 1
if finished % 20 == 0:
print "%i has finished %i" % (threading.current_thread().ident, finished)
except ExceedMaximumRetryError as e:
log.error("%s\tEXCEED_MAXIMUM_RETRY\t%s" % (e.sbid, e.url))
except Exception as e:
print e, task
log.error("%s\tUNEXPECTED\t%s\t%s" % (sbid, url, e))
def <|fim_middle|>(argv):
print crawl(argv[1], '/scratch/pdfs')
if __name__ == '__main__':
import sys
main(sys.argv)
<|fim▁end|> | main |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None<|fim▁hole|> out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script<|fim▁end|> | if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
<|fim_middle|>
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | """Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
<|fim_middle|>
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise ImportError("Cannot import matplotlib / pylab, which are required by this script.") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
<|fim_middle|>
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise IOError("Script must be called with exactly one argument specifying the input file") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
<|fim_middle|>
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise IOError("Failed to find infile %s" % infilename) |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
<|fim_middle|>
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise ValueError("%s specifies more than one file" % xf) |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
<|fim_middle|>
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | (site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n) |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
<|fim_middle|>
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise ValueError("%s failed to specify information for any sites" % xf) |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
<|fim_middle|>
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | title = None |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
<|fim_middle|>
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | pvalue = None
pvaluewithreplacement = None |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
<|fim_middle|>
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
<|fim_middle|>
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise ValueError("pvalue must be >= 1") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
<|fim_middle|>
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.") |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
<|fim_middle|>
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | main() # run the script |
<|file_name|>epitopefinder_plotdistributioncomparison.py<|end_file_name|><|fim▁begin|>#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def <|fim_middle|>():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
<|fim▁end|> | main |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate<|fim▁hole|>
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp<|fim▁end|> | |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
<|fim_middle|>
<|fim▁end|> | """
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
<|fim_middle|>
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
<|fim_middle|>
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | """
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
<|fim_middle|>
<|fim▁end|> | """
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
<|fim_middle|>
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | obsindx = obs['id'] |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
<|fim_middle|>
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | print("Warning: observation ID is not set, using zero!") |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
<|fim_middle|>
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | telescope = obs['telescope_id'] |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
<|fim_middle|>
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | global_offset = obs['global_offset'] |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
<|fim_middle|>
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | nse = obs[self._noisekey] |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
<|fim_middle|>
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey)) |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
<|fim_middle|>
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented') |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
<|fim_middle|>
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | times = tod.local_times() |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
<|fim_middle|>
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | times = None |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
<|fim_middle|>
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp])) |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
<|fim_middle|>
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | rate = self._rate |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
<|fim_middle|>
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | continue |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
<|fim_middle|>
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | continue |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
<|fim_middle|>
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | ref = tod.cache.reference(cachename) |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
<|fim_middle|>
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], )) |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def <|fim_middle|>(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | __init__ |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def <|fim_middle|>(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | exec |
<|file_name|>sim_det_noise.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def <|fim_middle|>(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
<|fim▁end|> | simulate_chunk |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
This package supplies tools for working with automated services
connected to a server. It was written with IRC in mind, so it's not
very generic, in that it pretty much assumes a single client connected
to a central server, and it's not easy for a client to add further connections
at runtime (But possible, though you might have to avoid selector.Reactor.loop.
"""
__all__ = [
"irc",
"selector",<|fim▁hole|> ]<|fim▁end|> | "connection",
"irc2num" |
<|file_name|>unassignedbugs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
import smtplib
import ConfigParser
# Retreive user information
config = ConfigParser.ConfigParser()
config.read('config.cfg')
user = config.get('data','user')
password = config.get('data','password')
fromaddr = config.get('data','fromaddr')
toaddr = config.get('data','toaddr')
smtpserver = config.get('data','smtp_server')
login_page='https://bugs.archlinux.org/index.php?do=authenticate'
# Create message
msg = "To: %s \nFrom: %s \nSubject: Bug Mail\n\n" % (toaddr,fromaddr)
msg += 'Unassigned bugs \n\n'
# build opener with HTTPCookieProcessor
o = urllib2.build_opener( urllib2.HTTPCookieProcessor() )
urllib2.install_opener( o )
p = urllib.urlencode( { 'user_name': user, 'password': password, 'remember_login' : 'on',} )
f = o.open(login_page, p)
data = f.read()
# Archlinux
url = "https://bugs.archlinux.org/index.php?string=&project=1&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
# Community
url2= "https://bugs.archlinux.org/index.php?string=&project=5&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
def parse_bugtrackerpage(url,count=1):
print url
# open bugtracker / parse
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
data = soup.findAll('td',{'class':'task_id'})
msg = ""
pages = False
# Is there another page with unassigned bugs
if soup.findAll('a',{'id': 'next' }) == []:
page = False
else:
print soup.findAll('a',{'id': 'next'})
count += 1<|fim▁hole|> pages = True
print count
# print all found bugs
for f in data:
title = f.a['title'].replace('Assigned |','')
title = f.a['title'].replace('| 0%','')
msg += '* [https://bugs.archlinux.org/task/%s FS#%s] %s \n' % (f.a.string,f.a.string,title)
if pages == True:
new = "%s&pagenum=%s" % (url,count)
msg += parse_bugtrackerpage(new,count)
return msg
msg += '\n\nArchlinux: \n\n'
msg += parse_bugtrackerpage(url)
msg += '\n\nCommunity: \n\n'
msg += parse_bugtrackerpage(url2)
msg = msg.encode("utf8")
# send mail
server = smtplib.SMTP(smtpserver)
server.sendmail(fromaddr, toaddr,msg)
server.quit()<|fim▁end|> | |
<|file_name|>unassignedbugs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
import smtplib
import ConfigParser
# Retreive user information
config = ConfigParser.ConfigParser()
config.read('config.cfg')
user = config.get('data','user')
password = config.get('data','password')
fromaddr = config.get('data','fromaddr')
toaddr = config.get('data','toaddr')
smtpserver = config.get('data','smtp_server')
login_page='https://bugs.archlinux.org/index.php?do=authenticate'
# Create message
msg = "To: %s \nFrom: %s \nSubject: Bug Mail\n\n" % (toaddr,fromaddr)
msg += 'Unassigned bugs \n\n'
# build opener with HTTPCookieProcessor
o = urllib2.build_opener( urllib2.HTTPCookieProcessor() )
urllib2.install_opener( o )
p = urllib.urlencode( { 'user_name': user, 'password': password, 'remember_login' : 'on',} )
f = o.open(login_page, p)
data = f.read()
# Archlinux
url = "https://bugs.archlinux.org/index.php?string=&project=1&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
# Community
url2= "https://bugs.archlinux.org/index.php?string=&project=5&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
def parse_bugtrackerpage(url,count=1):
<|fim_middle|>
msg += '\n\nArchlinux: \n\n'
msg += parse_bugtrackerpage(url)
msg += '\n\nCommunity: \n\n'
msg += parse_bugtrackerpage(url2)
msg = msg.encode("utf8")
# send mail
server = smtplib.SMTP(smtpserver)
server.sendmail(fromaddr, toaddr,msg)
server.quit()
<|fim▁end|> | print url
# open bugtracker / parse
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
data = soup.findAll('td',{'class':'task_id'})
msg = ""
pages = False
# Is there another page with unassigned bugs
if soup.findAll('a',{'id': 'next' }) == []:
page = False
else:
print soup.findAll('a',{'id': 'next'})
count += 1
pages = True
print count
# print all found bugs
for f in data:
title = f.a['title'].replace('Assigned |','')
title = f.a['title'].replace('| 0%','')
msg += '* [https://bugs.archlinux.org/task/%s FS#%s] %s \n' % (f.a.string,f.a.string,title)
if pages == True:
new = "%s&pagenum=%s" % (url,count)
msg += parse_bugtrackerpage(new,count)
return msg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.