code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
points = self._read_from_sql("SELECT * FROM history;", db_name)
return list(points.columns.values)[1:]
|
def points_from_sql(self, db_name)
|
Retrieve point list from SQL database
| 8.967523 | 8.163185 | 1.098532 |
his = self._read_from_sql('select * from "%s"' % "history", db_name)
his.index = his["index"].apply(Timestamp)
return his.set_index("index")[point]
|
def his_from_sql(self, db_name, point)
|
Retrive point histories from SQL database
| 7.180213 | 6.713727 | 1.069482 |
with open("%s.bin" % device_name, "rb") as file:
return pickle.load(file)["points"][point]
|
def read_point_prop(self, device_name, point)
|
Points properties retrieved from pickle
| 5.477357 | 4.757221 | 1.151378 |
with open("{}.bin".format(device_name), "rb") as file:
return pickle.load(file)["device"]
|
def read_dev_prop(self, device_name)
|
Device properties retrieved from pickle
| 6.335277 | 4.922899 | 1.2869 |
try:
res = self.properties.device.properties.network.read(
"{} {} {} presentValue".format(
self.properties.device.properties.address,
self.properties.type,
str(self.properties.address),
)
)
self._trend(res)
except Exception:
raise Exception("Problem reading : {}".format(self.properties.name))
if res == "inactive":
self._key = 0
self._boolKey = False
else:
self._key = 1
self._boolKey = True
return res
|
def value(self)
|
Read the value from BACnet network
| 7.018139 | 6.367179 | 1.102237 |
if self.lastValue == 1 or self.lastValue == "active":
self._key = 1
self._boolKey = True
else:
self._key = 0
self._boolKey = False
return self._boolKey
|
def boolValue(self)
|
returns : (boolean) Value
| 4.807156 | 4.578217 | 1.050006 |
try:
return self.properties.units_state[int(self.lastValue) - 1]
except IndexError:
value = "unknown"
except ValueError:
value = "NaN"
return value
|
def enumValue(self)
|
returns: (str) Enum state value
| 8.36361 | 8.415505 | 0.993833 |
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value
|
def value(self)
|
Take last known value as the value
| 6.260675 | 4.689091 | 1.335158 |
def _batches(self, request, points_per_request):
for i in range(0, len(request), points_per_request):
yield request[i : i + points_per_request]
|
Generator for creating 'request batches'. Each batch contains a maximum of "points_per_request"
points to read.
:params: request a list of point_name as a list
:params: (int) points_per_request
:returns: (iter) list of point_name of size <= points_per_request
| null | null | null |
|
def _rpm_request_by_name(self, point_list):
points = []
requests = []
for each in point_list:
str_list = []
point = self._findPoint(each, force_read=False)
points.append(point)
str_list.append(" " + point.properties.type)
str_list.append(" " + str(point.properties.address))
str_list.append(" presentValue")
rpm_param = "".join(str_list)
requests.append(rpm_param)
return (requests, points)
|
:param point_list: a list of point
:returns: (tuple) read request for each points, points
| null | null | null |
|
def read_multiple(
self,
points_list,
*,
points_per_request=25,
discover_request=(None, 6),
force_single=False
):
if not self.properties.pss["readPropertyMultiple"] or force_single:
self._log.warning("Read property Multiple Not supported")
self.read_single(
points_list, points_per_request=1, discover_request=discover_request
)
else:
if not self.properties.segmentation_supported:
points_per_request = 1
if discover_request[0]:
values = []
info_length = discover_request[1]
big_request = discover_request[0]
for request in self._batches(big_request, points_per_request):
try:
request = "{} {}".format(
self.properties.address, "".join(request)
)
self._log.debug("RPM_Request: %s " % request)
val = self.properties.network.readMultiple(request)
# print('val : ', val, len(val), type(val))
if val == None:
self.properties.segmentation_supported = False
raise SegmentationNotSupported
except KeyError as error:
raise Exception("Unknown point name : %s" % error)
except SegmentationNotSupported as error:
self.properties.segmentation_supported = False
# self.read_multiple(points_list,points_per_request=1, discover_request=discover_request)
self._log.warning("Segmentation not supported")
self._log.warning("Request too big...will reduce it")
if points_per_request == 1:
raise
self.read_multiple(
points_list,
points_per_request=1,
discover_request=discover_request,
)
else:
for points_info in self._batches(val, info_length):
values.append(points_info)
return values
else:
big_request = self._rpm_request_by_name(points_list)
i = 0
for request in self._batches(big_request[0], points_per_request):
try:
request = "{} {}".format(
self.properties.address, "".join(request)
)
val = self.properties.network.readMultiple(request)
except SegmentationNotSupported as error:
self.properties.segmentation_supported = False
self.read_multiple(
points_list,
points_per_request=1,
discover_request=discover_request,
)
except KeyError as error:
raise Exception("Unknown point name : %s" % error)
else:
points_values = zip(big_request[1][i : i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
|
Read points from a device using a ReadPropertyMultiple request.
[ReadProperty requests are very slow in comparison].
:param points_list: (list) a list of all point_name as str
:param points_per_request: (int) number of points in the request
Requesting many points results big requests that need segmentation. Aim to request
just the 'right amount' so segmentation can be avoided. Determining the 'right amount'
is often trial-&-error.
:Example:
device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10)
| null | null | null |
|
def read_multiple(
self, points_list, *, points_per_request=1, discover_request=(None, 6)
):
# print('PSS : %s' % self.properties.pss['readPropertyMultiple'])
if isinstance(points_list, list):
for each in points_list:
self.read_single(
each, points_per_request=1, discover_request=discover_request
)
else:
self.read_single(
points_list, points_per_request=1, discover_request=discover_request
)
|
Functions to read points from a device using the ReadPropertyMultiple request.
Using readProperty request can be very slow to read a lot of data.
:param points_list: (list) a list of all point_name as str
:param points_per_request: (int) number of points in the request
Using too many points will create big requests needing segmentation.
It's better to use just enough request so the message will not require
segmentation.
:Example:
device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10)
| null | null | null |
|
def poll(self, command="start", *, delay=120):
if delay > 120:
self._log.warning(
"Segmentation not supported, forcing delay to 120 seconds (or higher)"
)
delay = 120
# for each in self.points:
# each.value
# self._log.info('Complete')
if (
str(command).lower() == "stop"
or command == False
or command == 0
or delay == 0
):
if isinstance(self._polling_task.task, DevicePoll):
self._polling_task.task.stop()
while self._polling_task.task.is_alive():
pass
self._polling_task.task = None
self._polling_task.running = False
self._log.info("Polling stopped")
elif self._polling_task.task is None:
self._polling_task.task = DevicePoll(self, delay=delay)
self._polling_task.task.start()
self._polling_task.running = True
self._log.info(
"Polling started, values read every {} seconds".format(delay)
)
elif self._polling_task.running:
self._polling_task.task.stop()
while self._polling_task.task.is_alive():
pass
self._polling_task.running = False
self._polling_task.task = DevicePoll(self, delay=delay)
self._polling_task.task.start()
self._polling_task.running = True
self._log.info("Polling started, every values read each %s seconds" % delay)
else:
raise RuntimeError("Stop polling before redefining it")
|
Poll a point every x seconds (delay=x sec)
Can be stopped by using point.poll('stop') or .poll(0) or .poll(False)
or by setting a delay = 0
:param command: (str) start or stop polling
:param delay: (int) time delay between polls in seconds
:type command: str
:type delay: int
:Example:
device.poll()
device.poll('stop')
device.poll(delay = 5)
| null | null | null |
|
statistics = {}
mstp_networks = []
mstp_map = {}
ip_devices = []
bacoids = []
mstp_devices = []
for address, bacoid in self.whois_answer[0].keys():
if ":" in address:
net, mac = address.split(":")
mstp_networks.append(net)
mstp_devices.append(mac)
try:
mstp_map[net].append(mac)
except KeyError:
mstp_map[net] = []
mstp_map[net].append(mac)
else:
net = "ip"
mac = address
ip_devices.append(address)
bacoids.append((bacoid, address))
mstpnetworks = sorted(set(mstp_networks))
statistics["mstp_networks"] = mstpnetworks
statistics["ip_devices"] = sorted(ip_devices)
statistics["bacoids"] = sorted(bacoids)
statistics["mstp_map"] = mstp_map
statistics["timestamp"] = str(datetime.now())
statistics["number_of_devices"] = self.number_of_devices
statistics["number_of_registered_devices"] = len(self.registered_devices)
statistics["print_mstpnetworks"] = self.print_list(mstpnetworks)
return statistics
|
def network_stats(self)
|
Used by Flask to show informations on the network
| 3.067455 | 3.055669 | 1.003857 |
def connect(self, *, db=None):
if db:
self.poll(command="stop")
self.properties.db_name = db.split(".")[0]
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Already connected, provide db arg if you want to connect to db"
)
|
A connected device can be switched to 'database mode' where the device will
not use the BACnet network but instead obtain its contents from a previously
stored database.
| null | null | null |
|
def df(self, list_of_points, force_read=True):
his = []
for point in list_of_points:
try:
his.append(self._findPoint(point, force_read=force_read).history)
except ValueError as ve:
self._log.error("{}".format(ve))
continue
if not _PANDAS:
return dict(zip(list_of_points, his))
return pd.DataFrame(dict(zip(list_of_points, his)))
|
When connected, calling DF should force a reading on the network.
| null | null | null |
|
def _buildPointList(self):
try:
self.properties.pss.value = self.properties.network.read(
"{} device {} protocolServicesSupported".format(
self.properties.address, self.properties.device_id
)
)
except NoResponseFromController as error:
self._log.error("Controller not found, aborting. ({})".format(error))
return ("Not Found", "", [], [])
except SegmentationNotSupported as error:
self._log.warning("Segmentation not supported")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
self.properties.name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
self._log.info(
"Device {}:[{}] found... building points list".format(
self.properties.device_id, self.properties.name
)
)
try:
self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints(
self.custom_object_list
)
if self.properties.pollDelay > 0:
self.poll(delay=self.properties.pollDelay)
except NoResponseFromController as error:
self._log.error("Cannot retrieve object list, disconnecting...")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
except IndexError as error:
self._log.error("Device creation failed... disconnecting")
self.new_state(DeviceDisconnected)
|
Upon connection to build the device point list and properties.
| null | null | null |
|
def analog_units(self):
au = []
us = []
for each in self.points:
if isinstance(each, NumericPoint):
au.append(each.properties.name)
us.append(each.properties.units_state)
return dict(zip(au, us))
|
Shortcut to retrieve all analog points units [Used by Bokeh trending feature]
| null | null | null |
|
def _findPoint(self, name, force_read=True):
for point in self.points:
if point.properties.name == name:
if force_read:
point.value
return point
raise ValueError("{} doesn't exist in controller".format(name))
|
Used by getter and setter functions
| null | null | null |
|
def connect(self, *, db=None):
if not self.properties.network:
self.new_state(DeviceFromDB)
else:
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning(
"Segmentation not supported.... expect slow responses."
)
self.new_state(RPDeviceConnected)
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Offline: provide database name to load stored data."
)
self._log.warning("Ex. controller.connect(db = 'backup')")
|
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
| null | null | null |
|
def connect(self, *, network=None, from_backup=None):
if network and from_backup:
raise WrongParameter("Please provide network OR from_backup")
elif network:
self.properties.network = network
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
# self.db.close()
except NoResponseFromController:
self._log.error("Unable to connect, keeping DB mode active")
elif from_backup:
self.properties.db_name = from_backup.split(".")[0]
self._init_state()
|
In DBState, a device can be reconnected to BACnet using:
device.connect(network=bacnet) (bacnet = BAC0.connect())
| null | null | null |
|
if level:
file = level
stderr = level
stdout = level
file = convert_level(file)
stderr = convert_level(stderr)
stdout = convert_level(stdout)
BAC0_logger = logging.getLogger("BAC0")
# if console:
# BAC0_logger.setLevel(console)
# BAC0_logger.warning('Changed log level of console to {}'.format(logging.getLevelName(level)))
for handler in BAC0_logger.handlers:
if file and handler.get_name() == "file_handler":
handler.setLevel(file)
BAC0_logger.info(
"Changed log level of file to {}".format(logging.getLevelName(file))
)
elif stdout and handler.get_name() == "stdout":
handler.setLevel(stdout)
BAC0_logger.info(
"Changed log level of console stdout to {}".format(
logging.getLevelName(stdout)
)
)
elif stderr and handler.get_name() == "stderr":
handler.setLevel(stderr)
BAC0_logger.info(
"Changed log level of console stderr to {}".format(
logging.getLevelName(stderr)
)
)
|
def update_log_level(level=None, *, file=None, stderr=None, stdout=None)
|
Typical usage :
Normal
BAC0.log_level(file='warning', stdout='warning', stderr='error')
Info on console....but not in file
BAC0.log_level(file='warning', stdout='info', stderr='error')
Debug
BAC0.log_level(file='debug', stdout='info', stderr='error')
| 2.094776 | 1.948343 | 1.075158 |
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points)
|
def discoverPoints(bacnetapp, address, devID)
|
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
| 2.759398 | 2.463188 | 1.120255 |
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (previous.end - previous.control2))
else:
return self.control1 == self.start
|
def is_smooth_from(self, previous)
|
Checks if this segment would be a smooth segment following the previous
| 3.938217 | 3.70458 | 1.063067 |
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end)
|
def point(self, pos)
|
Calculate the x,y position at a certain position of the path
| 2.223292 | 2.000332 | 1.111461 |
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0)
|
def length(self, error=ERROR, min_depth=MIN_DEPTH)
|
Calculate the length of the path up to a certain position
| 3.777231 | 3.382177 | 1.116805 |
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start
|
def is_smooth_from(self, previous)
|
Checks if this segment would be a smooth segment following the previous
| 4.196405 | 3.909942 | 1.073265 |
'''
Registers a blueprint on the WebSockets.
'''
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
|
def register_blueprint(self, blueprint, **options)
|
Registers a blueprint on the WebSockets.
| 3.779158 | 3.211226 | 1.176858 |
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno
|
def adjust_lineno(filename, lineno, name)
|
Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
| 4.301723 | 4.516315 | 0.952485 |
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports
|
def find_imports(filename)
|
Find all imported names in a given file.
Returns a list of ImportInfo objects.
| 2.611594 | 3.440902 | 0.758985 |
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names
|
def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False)
|
Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
| 3.863531 | 4.404156 | 0.877247 |
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
|
def parsePathname(self, pathname)
|
Parse one or more source files.
``pathname`` may be a file name or a directory name.
| 3.155039 | 3.301712 | 0.955577 |
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
|
def writeCache(self, filename)
|
Write the graph to a cache file.
| 3.919198 | 4.366522 | 0.897556 |
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
|
def readCache(self, filename)
|
Load the graph from a cache file.
| 4.095547 | 4.23454 | 0.967176 |
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
|
def parseFile(self, filename)
|
Parse a single file.
| 4.519828 | 4.439104 | 1.018185 |
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
|
def filenameToModname(self, filename)
|
Convert a filename to a module name.
| 2.693763 | 2.641886 | 1.019636 |
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
|
def findModuleOfName(self, dotted_name, level, filename, extrapath=None)
|
Given a fully qualified name, find what module contains it.
| 4.368351 | 4.358542 | 1.002251 |
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
|
def isModule(self, dotted_name, extrapath=None)
|
Is ``dotted_name`` the name of a module?
| 2.115229 | 2.12556 | 0.99514 |
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
|
def isPackage(self, dotted_name, extrapath=None)
|
Is ``dotted_name`` the name of a package?
| 4.61328 | 4.372097 | 1.055164 |
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
|
def packageOf(self, dotted_name, packagelevel=None)
|
Determine the package that contains ``dotted_name``.
| 2.084301 | 1.956331 | 1.065413 |
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
|
def listModules(self)
|
Return an alphabetical list of all modules.
| 4.153345 | 3.37709 | 1.229859 |
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
|
def packageGraph(self, packagelevel=None)
|
Convert a module graph to a package graph.
| 3.118338 | 2.973269 | 1.048791 |
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
|
def collapseCycles(self)
|
Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
| 2.63132 | 2.568344 | 1.02452 |
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
|
def printImportedNames(self)
|
Produce a report of imported names.
| 4.312834 | 3.852823 | 1.119396 |
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
|
def printImports(self)
|
Produce a report of dependencies.
| 3.93666 | 3.672861 | 1.071824 |
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
|
def printUnusedImports(self)
|
Produce a report of unused imports.
| 4.115026 | 3.923336 | 1.048859 |
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
def printDot(self)
|
Produce a dependency graph in dot format.
| 3.117 | 2.971401 | 1.049 |
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@',
0xa0: u' '})
|
def quote(text)
|
encode html entities
| 2.879999 | 2.536071 | 1.135615 |
"create all classes and put them in ctx"
for (tag, info) in _TAGS.items():
class_name = tag.title()
quote_, compact, self_closing, docs = info
def __init__(self, *childs, **attrs):
TagBase.__init__(self, childs, attrs)
cls = type(class_name, (TagBase,), {
"__doc__": docs,
"__init__": __init__
})
cls.QUOTE = quote_
cls.COMPACT = compact
cls.SELF_CLOSING = self_closing
ctx[class_name] = cls
|
def _create_tags(ctx)
|
create all classes and put them in ctx
| 4.952615 | 3.920754 | 1.263179 |
tag = el.tag
namespace = None
if tag.startswith('{'):
# Strip namespace of the form "{namespace}tag"
namespace,tag = tag[1:].split('}')
try:
cls = globals()[tag.title()]
if not issubclass(cls, TagBase):
raise KeyError()
except KeyError:
raise ValueError("TagBase doesn't have a subclass for '%s'." % tag)
children = [tag_from_element(c) for c in el]
tag = cls(*children, **el.attrib)
tag.text = el.text
tag.tail = el.tail
if namespace:
tag.attrib['xmlns'] = namespace
return tag
|
def tag_from_element(el)
|
Convert an Element into a Tag.
``el`` is an instance of ``Element``. Returns an instance of the
corresponding subclass of ``TagBase``.
| 3.085825 | 3.068124 | 1.005769 |
code = ('<div>' + code + '</div>').encode('utf8')
el = ET.fromstring(code)
return [tag_from_element(c) for c in el]
|
def html_to_tags(code)
|
Convert HTML code to tags.
``code`` is a string containing HTML code. The return value is a
list of corresponding instances of ``TagBase``.
| 4.526862 | 6.421485 | 0.704956 |
fields = self.settings.math_output.split(None, 1)
name = fields[0].lower()
option = fields[1] if len(fields) > 1 else None
if name == 'html':
option = self.settings.math_css or option
self.math_handler = HTMLMathHandler(css_filename=option)
elif name == 'mathml':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = MathMLMathHandler()
elif name == 'mathjax':
# The MathJax handler can be configured via different ways:
#
# - By passing an additional JS url to "--math-output"
# (to stay backwards-compatible with docutils)
#
# - By using "--mathjax-opts" (to stay backwards compatible
# with the previous html5css3 mathjax postprocessor)
#
# - By using "--mathjax-url" and "--mathjax-config" (the
# preferred way)
js_url = option
config = None
if self.settings.mathjax_opts:
parts = self.settings.mathjax_opts.split(',')
options = dict(part.split('=', 1) for part in parts)
js_url = options.get('url', js_url)
config = options.get('config', config)
js_url = self.settings.mathjax_url or js_url
config = self.settings.mathjax_config or config
self.math_handler = MathJaxMathHandler(js_url=js_url,
config_filename=config)
elif name == 'latex':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = LaTeXMathHandler()
else:
raise ValueError('Unknown math handler "%s".' % name)
|
def _init_math_handler(self)
|
Parse math configuration and set up math handler.
| 2.906136 | 2.830905 | 1.026575 |
for style in utils.get_stylesheet_list(self.settings):
self.css(style)
|
def append_default_stylesheets(self)
|
Appends the default styles defined on the translator settings.
| 8.684252 | 6.919717 | 1.255001 |
if self.settings.embed_content:
content = codecs.open(path, 'r', encoding='utf8').read()
tag = Style(content, type="text/css")
else:
tag = Link(href=path, rel="stylesheet", type_="text/css")
self.head.append(tag)
|
def css(self, path)
|
Link/embed CSS file.
| 4.458042 | 4.138706 | 1.077158 |
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
# todo: exclude this key if value is its default
res += ["%s=%s" % (key, repr_arg(value))]
else:
res += [repr_arg(x)]
return ', '.join(res)
|
def repr_args(args)
|
formats a list of function arguments prettily but as working code
(kwargs are tuples (argname, argvalue)
| 3.248951 | 3.310125 | 0.981519 |
if isinstance(d, dict):
# if d can be expressed in key=value syntax:
return "{%s}" % ", ".join(
"%s: %s" % (repr_arg(k), repr_arg(v)) for k, v in d.items())
if isinstance(d, list):
return "[%s]" % ", ".join(repr_arg(elem) for elem in d)
if isinstance(d, unicode):
try:
return repr(d.encode("ascii"))
except UnicodeEncodeError:
return repr(d)
return repr(d)
|
def repr_arg(d)
|
formats a function argument prettily but as working code
unicode encodable as ascii is formatted as str
| 2.356075 | 2.351548 | 1.001925 |
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
if value and str_arg(value):
res += ["%s=%s" % (key, str_arg(value))]
else:
res += [str_arg(x)]
return ', '.join(res)
|
def str_args(args)
|
formats a list of function arguments prettily not as code
(kwargs are tuples (argname, argvalue)
| 2.471332 | 2.497211 | 0.989637 |
if not d:
return None
if isinstance(d, dict):
if len(d) == 2 and d.get('type') == 'text' and 'value' in d:
return str_arg(d['value'])
if len(d) == 2 and d.get('type') == 'text' and 'subkey' in d:
return ".%s" % d['subkey']
if d.get('type') == 'module':
return None
return "{%s}" % str_args(d.items())
if isinstance(d, list):
if len(d) == 1:
return str_arg(d[0])
return "[%s]" % ", ".join(str_arg(elem) for elem in d)
if isinstance(d, unicode):
return '"%s"' % d
return repr(d)
|
def str_arg(d)
|
formats a function argument prettily not as code
dicts are expressed in {key=value} syntax
strings are formatted using str in quotes not repr
| 2.257211 | 2.338782 | 0.965123 |
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
returnValue(iter(_OUTPUT))
|
def asyncPipeHash(context=None, _INPUT=None, conf=None, **kwargs)
|
A string module that asynchronously hashes the given text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
| 17.021824 | 21.024645 | 0.809613 |
conf = DotDict(conf)
limit = conf.get('count', func=int, **kwargs)
for item in deque(_INPUT, limit):
yield item
|
def pipe_tail(context=None, _INPUT=None, conf=None, **kwargs)
|
Returns a specified number of items from the bottom of a feed.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : count -- length of the truncated feed, if specified literally
Yields
------
_OUTPUT : items
| 10.178267 | 14.891761 | 0.683483 |
components = map(partial(_visit, graph=graph), graph)
node_component = dict(_gen_node_component(components))
graph_component = {component: [] for component in components}
graph_component.update(
dict(_gen_graph_component(graph, node_component, _gen_graph_value)))
return graph_component
|
def get_graph_component(graph)
|
Identify strongly connected components in a graph using
Tarjan's algorithm.
graph should be a dictionary mapping node names to
lists of successor nodes.
| 5.690787 | 6.458934 | 0.881072 |
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
first = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs(first=first)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT))
|
def asyncPipeStrregex(context=None, _INPUT=None, conf=None, **kwargs)
|
A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
| 20.139305 | 21.520439 | 0.935822 |
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs(first=convert_func))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT
|
def pipe_strregex(context=None, _INPUT=None, conf=None, **kwargs)
|
A string module that replaces text using regexes. Each has the general
format: "In [field] replace [regex pattern] with [text]". Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'match': {'value': <regex>},
'replace': {'value': <'replacement'>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings
| 22.527275 | 20.217358 | 1.114254 |
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
xpath = conf.get('xpath', **kwargs)
html5 = conf.get('html5', **kwargs) == 'true'
use_as_string = conf.get('useAsString', **kwargs) == 'true'
tree = html5parser.parse(f) if html5 else html.parse(f)
root = tree.getroot()
items = root.xpath(xpath)
if context and context.verbose:
print 'XPathFetchPage: found count items:', len(items)
for etree in items:
i = utils.etree_to_dict(etree)
if context and context.verbose:
print '--------------item data --------------------'
print i
print '--------------EOF item data ----------------'
if use_as_string:
yield {'content': unicode(i)}
else:
yield i
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break
|
def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use html5 parser?
useAsString -- emit items as string?
TODOS:
- don't retrieve pages larger than 1.5MB
- don't retrieve if page is not indexable.
Yields
------
_OUTPUT : items
| 6.465731 | 5.897513 | 1.096349 |
if pipe_def:
pydeps = gen_dependencies(pipe_def)
elif pipe_generator:
pydeps = pipe_generator(Context(describe_dependencies=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(set(pydeps))
|
def extract_dependencies(pipe_def=None, pipe_generator=None)
|
Extract modules used by a pipe
| 5.978137 | 5.959578 | 1.003114 |
if pipe_def:
pyinput = gen_input(pipe_def)
elif pipe_generator:
pyinput = pipe_generator(Context(describe_input=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(list(pyinput))
|
def extract_input(pipe_def=None, pipe_generator=None)
|
Extract inputs required by a pipe
| 6.692542 | 6.704779 | 0.998175 |
replace = {'-': '_', ':': '_', '/': '_'}
func = lambda id, pair: id.replace(pair[0], pair[1])
id = reduce(func, replace.iteritems(), id)
id = '_%s' % id if id[0] in string.digits else id
return id.encode(encoding)
|
def pythonise(id, encoding='ascii')
|
Return a Python-friendly id
| 3.810938 | 3.589513 | 1.061687 |
i = dict(element.items())
content = element.text.strip() if element.text else None
i.update({'content': content}) if content else None
if len(element.getchildren()):
for child in element.iterchildren():
tag = child.tag.split('}', 1)[-1]
new = etree_to_dict(child)
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
tag = 'content'
new = child.tail.strip() if child.tail else None
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
elif content and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = content
return i
|
def etree_to_dict(element)
|
Convert an eTree xml into dict imitating how Yahoo Pipes does it.
todo: further investigate white space and multivalue handling
| 3.033886 | 3.151212 | 0.962768 |
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
splits = izip(*tee(_INPUT, len(funcs)))
return map_func(partial(apply_func, funcs), splits)
|
def broadcast(_INPUT, *funcs, **kwargs)
|
copies an iterable and delivers the items to multiple functions
/--> foo2bar(_INPUT) --> \
/ \
_INPUT ---> foo2baz(_INPUT) ---> _OUTPUT
\ /
\--> foo2qux(_INPUT) --> /
One way to construct such a flow in code would be::
_INPUT = repeat('foo', 3)
foo2bar = lambda word: word.replace('foo', 'bar')
foo2baz = lambda word: word.replace('foo', 'baz')
foo2qux = lambda word: word.replace('foo', 'quz')
_OUTPUT = broadcast(_INPUT, foo2bar, foo2baz, foo2qux)
_OUTPUT == repeat(('bar', 'baz', 'qux'), 3)
| 4.056331 | 4.529867 | 0.895464 |
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
return map_func(partial(apply_func, funcs), splits)
|
def dispatch(splits, *funcs, **kwargs)
|
takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3)
| 3.228768 | 4.499147 | 0.71764 |
name = conf['name']['value']
prompt = conf['prompt']['value']
default = conf['default']['value'] or conf['debug']['value']
if context.submodule or context.inputs:
value = context.inputs.get(name, default)
elif not context.test:
# we skip user interaction during tests
raw = raw_input("%s (default=%s) " % (encode(prompt), encode(default)))
value = raw or default
else:
value = default
return value
|
def get_input(context, conf)
|
Gets a user parameter, either from the console or from an outer
submodule/system
Assumes conf has name, default, prompt and debug
| 5.200179 | 4.720092 | 1.101711 |
try:
return quote(url, safe=URL_SAFE)
except KeyError:
return quote(encode(url), safe=URL_SAFE)
|
def url_quote(url)
|
Ensure url is valid
| 3.387927 | 3.499895 | 0.968008 |
flags = rules[0]['flags']
# Create a combined regex from the rules
tuples = ((p, r['match']) for p, r in enumerate(rules))
regexes = ('(?P<match_%i>%s)' % (p, r) for p, r in tuples)
pattern = '|'.join(regexes)
regex = re.compile(pattern, flags)
resplit = re.compile('\$(\d+)')
# For each match, look-up corresponding replace value in dictionary
rules_in_series = ifilter(itemgetter('series'), rules)
rules_in_parallel = (r for r in rules if not r['series'])
try:
has_parallel = [rules_in_parallel.next()]
except StopIteration:
has_parallel = []
# print('================')
# pprint(rules)
# print('word:', word)
# print('pattern', pattern)
# print('flags', flags)
for _ in chain(rules_in_series, has_parallel):
# print('~~~~~~~~~~~~~~~~')
# print('new round')
# print('word:', word)
# found = list(regex.finditer(word))
# matchitems = [match.groupdict().items() for match in found]
# pprint(matchitems)
prev_name = None
prev_is_series = None
i = 0
for match in regex.finditer(word):
item = ifilter(itemgetter(1), match.groupdict().iteritems()).next()
# print('----------------')
# print('groupdict:', match.groupdict().items())
# print('item:', item)
if not item:
continue
name = item[0]
rule = rules[int(name[6:])]
series = rule.get('series')
kwargs = {'count': rule['count'], 'series': series}
is_previous = name is prev_name
singlematch = kwargs['count'] is 1
is_series = prev_is_series or kwargs['series']
isnt_previous = bool(prev_name) and not is_previous
if (is_previous and singlematch) or (isnt_previous and is_series):
continue
prev_name = name
prev_is_series = series
if resplit.findall(rule['replace']):
splits = resplit.split(rule['replace'])
words = _gen_words(match, splits)
else:
splits = rule['replace']
start = match.start() + i
end = match.end() + i
words = [word[:start], splits, word[end:]]
i += rule['offset']
# words = list(words)
word = ''.join(words)
# print('name:', name)
# print('prereplace:', rule['replace'])
# print('splits:', splits)
# print('resplits:', resplit.findall(rule['replace']))
# print('groups:', filter(None, match.groups()))
# print('i:', i)
# print('words:', words)
# print('range:', match.start(), '-', match.end())
# print('replace:', word)
# print('substitution:', word)
return word
|
def multi_substitute(word, rules)
|
Apply multiple regex rules to 'word'
http://code.activestate.com/recipes/
576710-multi-regex-single-pass-replace-of-multiple-regexe/
| 3.744881 | 3.719945 | 1.006703 |
pkwargs = cdicts(opts, kwargs)
asyncFuncs = yield asyncGetSplits(None, conf['attrs'], **pkwargs)
_input = yield _INPUT
finite = utils.finitize(_input)
inputs = imap(DotDict, finite)
pieces = yield asyncImap(asyncFuncs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
returnValue(_OUTPUT)
|
def asyncPipeItembuilder(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that asynchronously builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'attrs': [
{'key': {'value': 'title'}, 'value': {'value': 'new title'}},
{'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}}
]
}
Returns
------
_OUTPUT : twisted.internet.defer.Deferred generator of items
| 12.827152 | 14.517302 | 0.883577 |
funcs = get_splits(None, conf['attrs'], **cdicts(opts, kwargs))
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
pieces = imap(funcs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
return _OUTPUT
|
def pipe_itembuilder(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'attrs': [
{'key': {'value': <'title'>}, 'value': {'value': <'chair'>}},
{'key': {'value': <'color'>}, 'value': {'value': <'red'>}}
]
}
Returns
------
_OUTPUT : generator of items
| 12.478252 | 13.485258 | 0.925325 |
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(gathered)
returnValue(_OUTPUT)
|
def asyncPipeLoop(context=None, _INPUT=None, conf=None, embed=None, **kwargs)
|
An operator that asynchronously loops over the input and performs the
embedded submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
embed : the submodule, i.e., asyncPipe*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
| 10.588764 | 12.814588 | 0.826305 |
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(gathered)
return _OUTPUT
|
def pipe_loop(context=None, _INPUT=None, conf=None, embed=None, **kwargs)
|
An operator that loops over the input and performs the embedded
submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
embed : the submodule, i.e., pipe_*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : generator of items
| 9.072318 | 10.854362 | 0.835822 |
conf = DotDict(conf)
split_token = conf.get('token', **kwargs)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if not url:
continue
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
parsed = _parse_content(content, conf, **kwargs)
items = parsed.split(split_token) if split_token else [parsed]
if context and context.verbose:
print "FetchPage: found count items:", len(items)
for i in items:
if context and context.verbose:
print "--------------item data --------------------"
print i
print "--------------EOF item data ----------------"
yield {"content": i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break
|
def pipe_fetchpage(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that fetches the content of a given web site as a string.
Loopable.
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
from -- string from where to start the input
to -- string to limit the input
token -- if present, split the input on this token to generate items
Description: http://pipes.yahoo.com/pipes/docs?doc=sources#FetchPage
TODOS:
- don't retrieve pages larger than 200k
- don't retrieve if page is not indexable.
- item delimiter removes the closing tag if using a HTML tag
(not documented but happens)
- items should be cleaned, i.e. stripped of HTML tags
Yields
------
_OUTPUT : items
| 7.938751 | 7.449667 | 1.065652 |
# todo: iCal and KML
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
parsed = get_parsed(_INPUT, funcs[0])
results = starmap(parse_result, parsed)
items = imap(utils.gen_items, results)
_OUTPUT = utils.multiplex(items)
return _OUTPUT
|
def pipe_fetchdata(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that fetches and parses an XML or JSON file. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': {'value': <url>},
'path': {'value': <dot separated path to data list>}
}
Yields
------
_OUTPUT : items
Examples
--------
>>> from os import path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> parent = p.dirname(p.dirname(__file__))
>>> abspath = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys()[:5]
[u'y:repeatcount', u'description', u'pubDate', u'title', u'y:published']
>>> abspath = p.abspath(p.join(parent, 'data', 'places.xml'))
>>> path = 'appointment'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['alarmTime', 'begin', 'duration', 'places', 'subject', 'uid']
>>> conf = {'URL': {'value': url}, 'path': {'value': ''}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['appointment', 'reminder']
| 16.847126 | 20.146149 | 0.836245 |
splits = yield asyncGetSplits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT)
|
def asyncPipeFetch(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
| 18.30233 | 19.91452 | 0.919044 |
splits = get_splits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(items)
return _OUTPUT
|
def pipe_fetch(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that fetches and parses one or more feeds to return the
entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : generator of items
| 18.19445 | 16.804356 | 1.082722 |
conf = DotDict(conf)
test = kwargs.pop('pass_if', None)
permit = conf.get('MODE', **kwargs) == 'permit'
combine = conf.get('COMBINE', **kwargs)
if not combine in {'and', 'or'}:
raise Exception(
"Invalid combine: %s. (Expected 'and' or 'or')" % combine)
rule_defs = map(DotDict, utils.listize(conf['RULE']))
get_pass = partial(utils.get_pass, test=test)
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
get_rules = lambda i: imap(parse_conf, rule_defs, repeat(i))
funcs = [COMBINE_BOOLEAN[combine], utils.passthrough, utils.passthrough]
inputs = imap(DotDict, _INPUT)
splits = utils.broadcast(inputs, get_rules, utils.passthrough, get_pass)
outputs = starmap(partial(parse_rules, **kwargs), splits)
parsed = utils.dispatch(outputs, *funcs)
gathered = starmap(partial(parse_result, permit=permit), parsed)
_OUTPUT = ifilter(None, gathered)
return _OUTPUT
|
def pipe_filter(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
| 5.733296 | 5.896853 | 0.972264 |
return Split(context, _INPUT, conf, splits, **kwargs)
|
def pipe_split(context, _INPUT, conf, splits, **kwargs)
|
An operator that splits a source into identical copies. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : dict
splits : number of copies
Yields
------
_OUTPUT, _OUTPUT2... : copies of all source items
| 5.024946 | 12.37373 | 0.406098 |
conf = DotDict(conf)
for item in _INPUT:
_input = DotDict(item)
date = utils.get_value(conf['DATE'], _input, **kwargs).lower()
if date.endswith(' day') or date.endswith(' days'):
count = int(date.split(' ')[0])
new_date = dt.today() + timedelta(days=count)
elif date.endswith(' year') or date.endswith(' years'):
count = int(date.split(' ')[0])
new_date = dt.today().replace(year=dt.today().year + count)
else:
new_date = SWITCH.get(date)
if not new_date:
new_date = utils.get_date(date)
if not new_date:
raise Exception('Unrecognized date string: %s' % date)
yield new_date.timetuple()
|
def pipe_datebuilder(context=None, _INPUT=None, conf=None, **kwargs)
|
A date module that converts a text string into a datetime value. Useful
as terminal data. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {'DATE': {'type': 'datetime', 'value': '12/2/2014'}}
Yields
------
_OUTPUT : date timetuples
| 2.922286 | 3.117914 | 0.937257 |
deferreds = imap(asyncCallable, *iterables)
return gatherResults(deferreds, consumeErrors=True)
|
def asyncImap(asyncCallable, *iterables)
|
itertools.imap for deferred callables
| 5.704492 | 5.026701 | 1.134838 |
results = []
yield coopStar(asyncCallable, results.append, iterable)
returnValue(results)
|
def asyncStarCmap(asyncCallable, iterable)
|
itertools.starmap for deferred callables using cooperative multitasking
| 19.049925 | 16.660751 | 1.143401 |
results = []
yield asyncStarParallel(asyncCallable, results.append, iterable)
returnValue(results)
|
def asyncStarPmap(asyncCallable, iterable)
|
itertools.starmap for deferred callables using parallel cooperative
multitasking
| 13.569991 | 16.474888 | 0.823677 |
deferreds = starmap(asyncCallable, iterable)
return gatherResults(deferreds, consumeErrors=True)
|
def asyncStarMap(asyncCallable, iterable)
|
itertools.starmap for deferred callables
| 6.666833 | 5.858942 | 1.13789 |
get_value = partial(utils.get_value, **kwargs)
pkwargs = utils.combine_dicts({'parse_func': get_value}, kwargs)
parse_conf = partial(utils.parse_conf, DotDict(conf), **pkwargs)
get_RSS = lambda key, value: (RSS.get(key, key), value)
get_YAHOO = lambda key, value: (YAHOO.get(key), value)
make_dict = lambda func, conf: dict(starmap(func, conf.iteritems()))
clean_dict = lambda d: dict(i for i in d.items() if all(i))
funcs = [partial(make_dict, get_RSS), partial(make_dict, get_YAHOO)]
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
confs = imap(parse_conf, inputs)
splits = utils.broadcast(confs, *funcs)
combined = starmap(utils.combine_dicts, splits)
result = imap(clean_dict, combined)
_OUTPUT = imap(DotDict, result)
return _OUTPUT
|
def pipe_rssitembuilder(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that builds an rss item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : {
'mediaContentType': {'type': 'text', 'value': ''},
'mediaContentHeight': {'type': 'text', 'value': ''},
'mediaContentWidth': {'type': 'text', 'value': ''},
'mediaContentURL': {'type': 'text', 'value': 'url'},
'mediaThumbHeight': {'type': 'text', 'value': ''},
'mediaThumbWidth': {'type': 'text', 'value': ''},
'mediaThumbURL': {'type': 'text', 'value': 'url'},
'description': {'type': 'text', 'value': 'description'},
'pubdate': {'type': 'text', 'value': 'pubdate'},
'author': {'type': 'text', 'value': 'author'},
'title': {'type': 'text', 'value': 'title'},
'link': {'type': 'text', 'value': 'url'},
'guid': {'type': 'text', 'value': 'guid'},
}
Yields
------
_OUTPUT : items
| 4.842864 | 5.310192 | 0.911994 |
splits = yield asyncGetSplits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), splits)
returnValue(iter(_OUTPUT))
|
def asyncPipeStrconcat(context=None, _INPUT=None, conf=None, **kwargs)
|
A string module that asynchronously builds a string. Loopable. No direct
input.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'part': [
{'value': <'<img src="'>},
{'subkey': <'img.src'>},
{'value': <'">'>}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of joined strings
| 24.255583 | 28.286255 | 0.857504 |
splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = starmap(parse_result, splits)
return _OUTPUT
|
def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs)
|
A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
| 23.076048 | 18.777336 | 1.228931 |
_input = yield _INPUT
asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs))
pieces = yield asyncFuncs[0]()
_pass = yield asyncFuncs[2]()
_OUTPUT = _input if _pass else unique_items(_input, pieces.field)
returnValue(_OUTPUT)
|
def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that asynchronously filters out non unique items according
to the specified field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items
| 19.806488 | 19.451721 | 1.018238 |
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
pieces, _pass = funcs[0](), funcs[2]()
_OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field)
return _OUTPUT
|
def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that filters out non unique items according to the specified
field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
Returns
-------
_OUTPUT : generator of unique items
| 19.378986 | 20.162096 | 0.961159 |
_input = yield _INPUT
_OUTPUT = get_output(_input, **kwargs)
returnValue(_OUTPUT)
|
def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that asynchronously merges multiple source together.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : asyncPipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
| 12.108624 | 15.705399 | 0.770985 |
_OUTPUT = get_output(_INPUT, **kwargs)
return _OUTPUT
|
def pipe_union(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that merges multiple source together. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : pipe2py.modules pipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : generator of items
| 7.689914 | 14.03522 | 0.547901 |
test = kwargs.pop('pass_if', None)
_pass = utils.get_pass(test=test)
key_defs = imap(DotDict, utils.listize(conf['KEY']))
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
keys = imap(parse_conf, key_defs)
order = ('%s%s' % ('-' if k.dir == 'DESC' else '', k.field) for k in keys)
comparers = map(get_comparer, order)
cmp_func = partial(multikeysort, comparers=comparers)
_OUTPUT = _INPUT if _pass else iter(sorted(_INPUT, cmp=cmp_func))
return _OUTPUT
|
def pipe_sort(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that sorts the input source according to the specified key.
Not loopable. Not lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {
'KEY': [
{
'field': {'type': 'text', 'value': 'title'},
'dir': {'type': 'text', 'value': 'DESC'}
}
]
}
Returns
-------
_OUTPUT : generator of sorted items
| 5.176033 | 4.830243 | 1.071588 |
conf = DotDict(conf)
for item in _INPUT:
item = DotDict(item)
yield {
value: item.get(conf.get(key, **kwargs))
for key, value in RSS_FIELDS.items()}
|
def pipe_createrss(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that converts a source into an RSS stream. Not loopable.
| 7.411638 | 7.133359 | 1.039011 |
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_fetchsitefeed loading:", url
for link in autorss.getRSSLink(url.encode('utf-8')):
parsed = speedparser.parse(urlopen(link).read())
for entry in utils.gen_entries(parsed):
yield entry
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break
|
def pipe_fetchsitefeed(context=None, _INPUT=None, conf=None, **kwargs)
|
A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
| 9.712928 | 9.708862 | 1.000419 |
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
asyncConvert = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs('pass', asyncConvert)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield maybeDeferred(parse_results, parsed)
returnValue(iter(_OUTPUT))
|
def asyncPipeRegex(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that asynchronously replaces text in items using regexes.
Each has the general format: "In [field] replace [match] with [replace]".
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
| 16.606449 | 17.323242 | 0.958622 |
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', convert_func))
_OUTPUT = parse_results(parsed)
return _OUTPUT
|
def pipe_regex(context=None, _INPUT=None, conf=None, **kwargs)
|
An operator that replaces text in items using regexes. Each has the
general format: "In [field] replace [match] with [replace]". Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : generator of items
| 20.777119 | 19.820429 | 1.048268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.