code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
return [Socket(self._device, i) for i in range(len(self.raw))] | def sockets(self) | Return socket objects of the socket control. | 10.465815 | 7.632806 | 1.371162 |
res_payload = res.payload.decode('utf-8')
output = res_payload.strip()
_LOGGER.debug('Status: %s, Received: %s', res.code, output)
if not output:
return None
if not res.code.is_successful():
if 128 <= res.code < 160:
raise ClientError(output)
elif 160 <= res.code < 192:
raise ServerError(output)
if not parse_json:
return output
return json.loads(output) | def _process_output(res, parse_json=True) | Process output. | 3.388819 | 3.289483 | 1.030198 |
if self._protocol is None:
self._protocol = asyncio.Task(Context.create_client_context(
loop=self._loop))
return (await self._protocol) | async def _get_protocol(self) | Get the protocol for the request. | 6.853505 | 5.613828 | 1.220826 |
# Be responsible and clean up.
protocol = await self._get_protocol()
await protocol.shutdown()
self._protocol = None
# Let any observers know the protocol has been shutdown.
for ob_error in self._observations_err_callbacks:
ob_error(exc)
self._observations_err_callbacks.clear() | async def _reset_protocol(self, exc=None) | Reset the protocol if an error occurs. | 7.493239 | 6.505764 | 1.151785 |
try:
protocol = await self._get_protocol()
pr = protocol.request(msg)
r = await pr.response
return pr, r
except ConstructionRenderableError as e:
raise ClientError("There was an error with the request.", e)
except RequestTimedOut as e:
await self._reset_protocol(e)
raise RequestTimeout('Request timed out.', e)
except (OSError, socket.gaierror, Error) as e:
# aiocoap sometimes raises an OSError/socket.gaierror too.
# aiocoap issue #124
await self._reset_protocol(e)
raise ServerError("There was an error with the request.", e)
except asyncio.CancelledError as e:
await self._reset_protocol(e)
raise e | async def _get_response(self, msg) | Perform the request, get the response. | 4.46406 | 4.236474 | 1.053721 |
if api_command.observe:
await self._observe(api_command)
return
method = api_command.method
path = api_command.path
data = api_command.data
parse_json = api_command.parse_json
url = api_command.url(self._host)
kwargs = {}
if data is not None:
kwargs['payload'] = json.dumps(data).encode('utf-8')
_LOGGER.debug('Executing %s %s %s: %s', self._host, method, path,
data)
else:
_LOGGER.debug('Executing %s %s %s', self._host, method, path)
api_method = Code.GET
if method == 'put':
api_method = Code.PUT
elif method == 'post':
api_method = Code.POST
elif method == 'delete':
api_method = Code.DELETE
elif method == 'fetch':
api_method = Code.FETCH
elif method == 'patch':
api_method = Code.PATCH
msg = Message(code=api_method, uri=url, **kwargs)
_, res = await self._get_response(msg)
api_command.result = _process_output(res, parse_json)
return api_command.result | async def _execute(self, api_command) | Execute the command. | 2.635517 | 2.569653 | 1.025632 |
if not isinstance(api_commands, list):
result = await self._execute(api_commands)
return result
commands = (self._execute(api_command) for api_command in api_commands)
command_results = await asyncio.gather(*commands, loop=self._loop)
return command_results | async def request(self, api_commands) | Make a request. | 3.063554 | 2.848501 | 1.075497 |
duration = api_command.observe_duration
url = api_command.url(self._host)
err_callback = api_command.err_callback
msg = Message(code=Code.GET, uri=url, observe=duration)
# Note that this is necessary to start observing
pr, r = await self._get_response(msg)
api_command.result = _process_output(r)
def success_callback(res):
api_command.result = _process_output(res)
def error_callback(ex):
err_callback(ex)
ob = pr.observation
ob.register_callback(success_callback)
ob.register_errback(error_callback)
self._observations_err_callbacks.append(ob.error) | async def _observe(self, api_command) | Observe an endpoint. | 6.122834 | 5.83895 | 1.048619 |
if not self._psk:
PatchedDTLSSecurityStore.IDENTITY = 'Client_identity'.encode(
'utf-8')
PatchedDTLSSecurityStore.KEY = security_key.encode('utf-8')
command = Gateway().generate_psk(self._psk_id)
self._psk = await self.request(command)
PatchedDTLSSecurityStore.IDENTITY = self._psk_id.encode('utf-8')
PatchedDTLSSecurityStore.KEY = self._psk.encode('utf-8')
# aiocoap has now cached our psk, so it must be reset.
# We also no longer need the protocol, so this will clean that up.
await self._reset_protocol()
return self._psk | async def generate_psk(self, security_key) | Generate and set a psk from the security key. | 5.081549 | 4.902538 | 1.036514 |
info = self.raw.get(ATTR_MEMBERS, {})
if not info or ROOT_DEVICES2 not in info:
return []
return info[ROOT_DEVICES2].get(ATTR_ID, []) | def member_ids(self) | Members of this group. | 8.565978 | 7.263086 | 1.179385 |
values = {
ATTR_LIGHT_DIMMER: dimmer,
}
if transition_time is not None:
values[ATTR_TRANSITION_TIME] = transition_time
return self.set_values(values) | def set_dimmer(self, dimmer, transition_time=None) | Set dimmer value of a group.
dimmer: Integer between 0..255
transition_time: Integer representing tenth of a second (default None) | 2.604921 | 3.72335 | 0.699618 |
print("Printing information about the Gateway")
data = api(gateway.get_gateway_info()).raw
print(jsonify(data)) | def print_gateway() | Print gateway info as JSON | 11.942122 | 9.537248 | 1.252156 |
print("Printing information about all devices paired to the Gateway")
if len(devices) == 0:
exit(bold("No devices paired"))
container = []
for dev in devices:
container.append(dev.raw)
print(jsonify(container)) | def print_all_devices() | Print all devices as JSON | 7.62807 | 6.679534 | 1.142006 |
print("Printing information about all lamps paired to the Gateway")
lights = [dev for dev in devices if dev.has_light_control]
if len(lights) == 0:
exit(bold("No lamps paired"))
container = []
for l in lights:
container.append(l.raw)
print(jsonify(container)) | def print_lamps() | Print all lamp devices as JSON | 6.803607 | 5.729262 | 1.187519 |
print("Printing information about smart tasks")
tasks = api(gateway.get_smart_tasks())
if len(tasks) == 0:
exit(bold("No smart tasks defined"))
container = []
for task in tasks:
container.append(api(task).task_control.raw)
print(jsonify(container)) | def print_smart_tasks() | Print smart tasks as JSON | 8.044561 | 7.155881 | 1.124189 |
print("Printing information about all groups defined in the Gateway")
groups = api(gateway.get_groups())
if len(groups) == 0:
exit(bold("No groups defined"))
container = []
for group in groups:
container.append(api(group).raw)
print(jsonify(container)) | def print_groups() | Print all groups as JSON | 7.664134 | 6.626082 | 1.156661 |
if not GDAL_AVAILABLE:
raise Exception("richdem.LoadGDAL() requires GDAL.")
allowed_types = {gdal.GDT_Byte,gdal.GDT_Int16,gdal.GDT_Int32,gdal.GDT_UInt16,gdal.GDT_UInt32,gdal.GDT_Float32,gdal.GDT_Float64}
#Read in data
src_ds = gdal.Open(filename)
srcband = src_ds.GetRasterBand(1)
if no_data is None:
no_data = srcband.GetNoDataValue()
if no_data is None:
raise Exception("The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.")
srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data)
# raster_srs = osr.SpatialReference()
# raster_srs.ImportFromWkt(raster.GetProjectionRef())
if not srcband.DataType in allowed_types:
raise Exception("This datatype is not supported. Please file a bug report on RichDEM.")
srcdata.projection = src_ds.GetProjectionRef()
srcdata.geotransform = src_ds.GetGeoTransform()
srcdata.metadata = dict()
for k,v in src_ds.GetMetadata().items():
srcdata.metadata[k] = v
_AddAnalysis(srcdata, "LoadGDAL(filename={0}, no_data={1})".format(filename, no_data))
return srcdata | def LoadGDAL(filename, no_data=None) | Read a GDAL file.
Opens any file GDAL can read, selects the first raster band, and loads it
and its metadata into a RichDEM array of the appropriate data type.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to open
no_data (float): Optionally, set the no_data value to this.
Returns:
A RichDEM array | 3.412883 | 3.448624 | 0.989636 |
if type(rda) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not GDAL_AVAILABLE:
raise Exception("richdem.SaveGDAL() requires GDAL.")
driver = gdal.GetDriverByName('GTiff')
data_type = gdal.GDT_Float32 #TODO
data_set = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type)
data_set.SetGeoTransform(rda.geotransform)
data_set.SetProjection(rda.projection)
band = data_set.GetRasterBand(1)
band.SetNoDataValue(rda.no_data)
band.WriteArray(np.array(rda))
for k,v in rda.metadata.items():
data_set.SetMetadataItem(str(k),str(v)) | def SaveGDAL(filename, rda) | Save a GDAL file.
Saves a RichDEM array to a data file in GeoTIFF format.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to be created
rda (rdarray): Data to save.
Returns:
No Return | 2.539926 | 2.388869 | 1.063233 |
if type(dem) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if topology not in ['D8','D4']:
raise Exception("Unknown topology!")
if not in_place:
dem = dem.copy()
_AddAnalysis(dem, "FillDepressions(dem, epsilon={0})".format(epsilon))
demw = dem.wrap()
if epsilon:
if topology=='D8':
_richdem.rdPFepsilonD8(demw)
elif topology=='D4':
_richdem.rdPFepsilonD4(demw)
else:
if topology=='D8':
_richdem.rdFillDepressionsD8(demw)
elif topology=='D4':
_richdem.rdFillDepressionsD4(demw)
dem.copyFromWrapped(demw)
if not in_place:
return dem | def FillDepressions(
dem,
epsilon = False,
in_place = False,
topology = 'D8'
) | Fills all depressions in a DEM.
Args:
dem (rdarray): An elevation model
epsilon (float): If True, an epsilon gradient is imposed to all flat regions.
This ensures that there is always a local gradient.
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
topology (string): A topology indicator
Returns:
DEM without depressions. | 3.370652 | 3.411494 | 0.988028 |
if type(dem) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if topology not in ['D8','D4']:
raise Exception("Unknown topology!")
if not in_place:
dem = dem.copy()
_AddAnalysis(dem, "BreachDepressions(dem)")
demw = dem.wrap()
if topology=='D8':
_richdem.rdBreachDepressionsD8(demw)
elif topology=='D4':
_richdem.rdBreachDepressionsD4(demw)
dem.copyFromWrapped(demw)
if not in_place:
return dem | def BreachDepressions(
dem,
in_place = False,
topology = 'D8'
) | Breaches all depressions in a DEM.
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
topology (string): A topology indicator
Returns:
DEM without depressions. | 4.185639 | 4.127036 | 1.0142 |
if type(dem) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not in_place:
dem = dem.copy()
_AddAnalysis(dem, "ResolveFlats(dem, in_place={in_place})".format(in_place=in_place))
demw = dem.wrap()
_richdem.rdResolveFlatsEpsilon(demw)
dem.copyFromWrapped(demw)
if not in_place:
return dem | def ResolveFlats(
dem,
in_place = False
) | Attempts to resolve flats by imposing a local gradient
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
Returns:
DEM modified such that all flats drain. | 6.772937 | 6.805242 | 0.995253 |
if type(props) is not rd3array:
raise Exception("A richdem.rd3array or numpy.ndarray is required!")
if weights is not None and in_place:
accum = rdarray(weights, no_data=-1)
elif weights is not None and not in_place:
accum = rdarray(weights, copy=True, meta_obj=props, no_data=-1)
elif weights is None:
accum = rdarray(np.ones(shape=props.shape[0:2], dtype='float64'), meta_obj=props, no_data=-1)
else:
raise Exception("Execution should never reach this point!")
if accum.dtype!='float64':
raise Exception("Accumulation array must be of type 'float64'!")
accumw = accum.wrap()
_AddAnalysis(accum, "FlowAccumFromProps(dem, weights={weights}, in_place={in_place})".format(
weights = 'None' if weights is None else 'weights',
in_place = in_place
))
_richdem.FlowAccumulation(props.wrap(),accumw)
accum.copyFromWrapped(accumw)
return accum | def FlowAccumFromProps(
props,
weights = None,
in_place = False
) | Calculates flow accumulation from flow proportions.
Args:
props (rdarray): An elevation model
weights (rdarray): Flow accumulation weights to use. This is the
amount of flow generated by each cell. If this is
not provided, each cell will generate 1 unit of
flow.
in_place (bool): If True, then `weights` is modified in place. An
accumulation matrix is always returned, but it will
just be a view of the modified data if `in_place`
is True.
Returns:
A flow accumulation array. If `weights` was provided and `in_place` was
True, then this matrix is a view of the modified data. | 4.655315 | 4.575643 | 1.017412 |
return delimiter.join(filter(lambda s: s != '', map(lambda s: s.lstrip(delimiter), args))) | def _join(*args) | Join S3 bucket args together.
Remove empty entries and strip left-leading ``/`` | 6.395042 | 5.964279 | 1.072224 |
self.bookstore_settings = BookstoreSettings(config=self.config)
self.session = aiobotocore.get_session() | def initialize(self) | Initialize a helper to get bookstore settings and session information quickly | 8.154492 | 3.864939 | 2.109863 |
self.log.info("Attempt publishing to %s", path)
if path == '' or path == '/':
raise web.HTTPError(400, "Must provide a path for publishing")
model = self.get_json_body()
if model:
await self._publish(model, path.lstrip('/'))
else:
raise web.HTTPError(400, "Cannot publish an empty model") | async def put(self, path='') | Publish a notebook on a given path.
The payload directly matches the contents API for PUT. | 4.032966 | 3.845405 | 1.048775 |
if model['type'] != 'notebook':
raise web.HTTPError(400, "bookstore only publishes notebooks")
content = model['content']
full_s3_path = s3_path(
self.bookstore_settings.s3_bucket, self.bookstore_settings.published_prefix, path
)
file_key = s3_key(self.bookstore_settings.published_prefix, path)
self.log.info(
"Publishing to %s",
s3_display_path(
self.bookstore_settings.s3_bucket, self.bookstore_settings.published_prefix, path
),
)
async with self.session.create_client(
's3',
aws_secret_access_key=self.bookstore_settings.s3_secret_access_key,
aws_access_key_id=self.bookstore_settings.s3_access_key_id,
endpoint_url=self.bookstore_settings.s3_endpoint_url,
region_name=self.bookstore_settings.s3_region_name,
) as client:
self.log.info("Processing published write of %s", path)
obj = await client.put_object(
Bucket=self.bookstore_settings.s3_bucket, Key=file_key, Body=json.dumps(content)
)
self.log.info("Done with published write of %s", path)
self.set_status(201)
resp_content = {"s3path": full_s3_path}
if 'VersionId' in obj:
resp_content["versionID"] = obj['VersionId']
resp_str = json.dumps(resp_content)
self.finish(resp_str) | async def _publish(self, model, path) | Publish notebook model to the path | 2.327378 | 2.272197 | 1.024285 |
general_settings = [settings.s3_bucket != "", settings.s3_endpoint_url != ""]
archive_settings = [settings.workspace_prefix != ""]
published_settings = [settings.published_prefix != ""]
validation_checks = {
"bookstore_valid": all(general_settings),
"archive_valid": all(archive_settings),
"publish_valid": all(published_settings),
}
return validation_checks | def validate_bookstore(settings: BookstoreSettings) | Validate bookstore configuration settings.
Parameters
----------
settings: bookstore.bookstore_config.BookstoreSettings
The instantiated `Settings` object to be validated. | 4.253714 | 4.486779 | 0.948055 |
self.token = self.nb_record.token
first = requests.get(f"{self.url}/login")
self.xsrf_token = first.cookies.get("_xsrf", "") | def setup_auth(self) | Sets up token access for authorizing requests to notebook server.
This sets the notebook token as self.token and the xsrf_token as self.xsrf_token. | 9.505254 | 6.820478 | 1.393635 |
self.req_session = requests.Session()
self.req_session.headers.update(self.headers) | def setup_request_sessions(self) | Sets up a requests.Session object for sharing headers across API requests. | 3.240341 | 2.351883 | 1.377765 |
async with self.path_lock_ready:
lock = self.path_locks.get(record.filepath)
if lock is None:
lock = Lock()
self.path_locks[record.filepath] = lock
# Skip writes when a given path is already locked
if lock.locked():
self.log.info("Skipping archive of %s", record.filepath)
return
async with lock:
try:
async with self.session.create_client(
's3',
aws_secret_access_key=self.settings.s3_secret_access_key,
aws_access_key_id=self.settings.s3_access_key_id,
endpoint_url=self.settings.s3_endpoint_url,
region_name=self.settings.s3_region_name,
) as client:
self.log.info("Processing storage write of %s", record.filepath)
file_key = s3_key(self.settings.workspace_prefix, record.filepath)
await client.put_object(
Bucket=self.settings.s3_bucket, Key=file_key, Body=record.content
)
self.log.info("Done with storage write of %s", record.filepath)
except Exception as e:
self.log.error(
'Error while archiving file: %s %s', record.filepath, e, exc_info=True
) | async def archive(self, record: ArchiveRecord) | Process a record to write to storage.
Acquire a path lock before archive. Writing to storage will only be
allowed to a path if a valid `path_lock` is held and the path is not
locked by another process.
Parameters
----------
record : ArchiveRecord
A notebook and where it should be written to storage | 2.440537 | 2.300235 | 1.060995 |
if model["type"] != "notebook":
return
content = json.dumps(model["content"])
loop = ioloop.IOLoop.current()
# Offload archival and schedule write to storage with the current event loop
loop.spawn_callback(
self.archive,
ArchiveRecord(
content=content, filepath=path, queued_time=ioloop.IOLoop.current().time()
),
) | def run_pre_save_hook(self, model, path, **kwargs) | Send request to store notebook to S3.
This hook offloads the storage request to the event loop.
When the event loop is available for execution of the request, the
storage of the notebook will be done and the write to storage occurs.
Parameters
----------
model : str
The type of file
path : str
The storage location | 9.357324 | 7.859406 | 1.190589 |
app = Flask(__name__)
app.config.from_object(CONFIG[config_name])
BOOTSTRAP.init_app(app)
# call controllers
from flask_seguro.controllers.main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | def create_app(config_name) | Factory Function | 3.937856 | 3.833851 | 1.027128 |
params = kwargs or {}
params['reference'] = self.reference
params['preApprovalCode'] = self.code
for i, item in enumerate(self.items, 1):
params['itemId%s' % i] = item.get('id')
params['itemDescription%s' % i] = item.get('description')
params['itemAmount%s' % i] = item.get('amount')
params['itemQuantity%s' % i] = item.get('quantity')
params['itemWeight%s' % i] = item.get('weight')
params['itemShippingCost%s' % i] = item.get('shipping_cost')
self.data.update(params)
self.clean_none_params() | def build_pre_approval_payment_params(self, **kwargs) | build a dict with params | 2.314905 | 2.279108 | 1.015706 |
return requests.get(url, params=self.data, headers=self.config.HEADERS) | def get(self, url) | do a get transaction | 5.901121 | 6.431768 | 0.917496 |
return requests.post(url, data=self.data, headers=self.config.HEADERS) | def post(self, url) | do a post request | 5.245187 | 5.878664 | 0.892241 |
self.data['currency'] = self.config.CURRENCY
self.build_checkout_params(**kwargs)
if transparent:
response = self.post(url=self.config.TRANSPARENT_CHECKOUT_URL)
else:
response = self.post(url=self.config.CHECKOUT_URL)
return PagSeguroCheckoutResponse(response.content, config=self.config) | def checkout(self, transparent=False, **kwargs) | create a pagseguro checkout | 3.290725 | 2.941825 | 1.1186 |
response = self.get(url=self.config.NOTIFICATION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config) | def check_notification(self, code) | check a notification by its code | 7.494095 | 6.937065 | 1.080298 |
response = self.get(
url=self.config.PRE_APPROVAL_NOTIFICATION_URL % code)
return PagSeguroPreApprovalNotificationResponse(
response.content, self.config) | def check_pre_approval_notification(self, code) | check a notification by its code | 5.656956 | 5.128829 | 1.102972 |
self.build_pre_approval_payment_params(**kwargs)
response = self.post(url=self.config.PRE_APPROVAL_PAYMENT_URL)
return PagSeguroPreApprovalPayment(response.content, self.config) | def pre_approval_ask_payment(self, **kwargs) | ask form a subscribe payment | 4.806258 | 4.521984 | 1.062865 |
response = self.get(url=self.config.PRE_APPROVAL_CANCEL_URL % code)
return PagSeguroPreApprovalCancel(response.content, self.config) | def pre_approval_cancel(self, code) | cancel a subscribe | 5.0931 | 5.329726 | 0.955603 |
response = self.get(url=self.config.TRANSACTION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config) | def check_transaction(self, code) | check a transaction by its code | 8.704833 | 7.641752 | 1.139115 |
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_transactions(
initial_date, final_date, page, max_results)
results.extend(search_result.transactions)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results | def query_transactions(self, initial_date, final_date,
page=None,
max_results=None) | query transaction by date range | 2.192976 | 2.248615 | 0.975257 |
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_pre_approvals(
initial_date, final_date, page, max_results)
results.extend(search_result.pre_approvals)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results | def query_pre_approvals(self, initial_date, final_date, page=None,
max_results=None) | query pre-approvals by date range | 2.213883 | 2.262547 | 0.978491 |
cart = Cart(session['cart'])
if cart.change_item(item_id, 'add'):
session['cart'] = cart.to_dict()
return list_products() | def add_to_cart(item_id) | Cart with Product | 4.540049 | 5.105798 | 0.889195 |
return {
"total": self.total,
"subtotal": self.subtotal,
"items": self.items,
"extra_amount": self.extra_amount
} | def to_dict(self) | Attribute values to dict | 4.710343 | 3.934988 | 1.197041 |
product = Products().get_one(item_id)
if product:
if operation == 'add':
self.items.append(product)
elif operation == 'remove':
cart_p = [x for x in self.items if x['id'] == product['id']]
self.items.remove(cart_p[0])
self.update()
return True
else:
return False | def change_item(self, item_id, operation) | Remove items in cart | 2.908866 | 2.609492 | 1.114725 |
subtotal = float(0)
total = float(0)
for product in self.items:
subtotal += float(product["price"])
if subtotal > 0:
total = subtotal + self.extra_amount
self.subtotal = subtotal
self.total = total | def update(self) | Remove items in cart | 3.940155 | 3.22341 | 1.222356 |
u = urlparse(url)
if u.netloc.find('@') > -1 and (u.scheme == 'bolt' or u.scheme == 'bolt+routing'):
credentials, hostname = u.netloc.rsplit('@', 1)
username, password, = credentials.split(':')
else:
raise ValueError("Expecting url format: bolt://user:password@localhost:7687"
" got {0}".format(url))
self.driver = GraphDatabase.driver(u.scheme + '://' + hostname,
auth=basic_auth(username, password),
encrypted=config.ENCRYPTED_CONNECTION,
max_pool_size=config.MAX_POOL_SIZE)
self.url = url
self._pid = os.getpid()
self._active_transaction = None | def set_connection(self, url) | Sets the connection URL to the address a Neo4j server is set up at | 3.225122 | 3.111593 | 1.036486 |
if self._active_transaction:
raise SystemError("Transaction in progress")
self._active_transaction = self.driver.session(access_mode=access_mode).begin_transaction() | def begin(self, access_mode=None) | Begins a new transaction, raises SystemError exception if a transaction is in progress | 4.923153 | 3.532456 | 1.393691 |
# Object resolution occurs in-place
for a_result_item in enumerate(result_list):
for a_result_attribute in enumerate(a_result_item[1]):
try:
# Primitive types should remain primitive types,
# Nodes to be resolved to native objects
resolved_object = a_result_attribute[1]
if type(a_result_attribute[1]) is Node:
resolved_object = self._NODE_CLASS_REGISTRY[frozenset(a_result_attribute[1].labels)].inflate(
a_result_attribute[1])
if type(a_result_attribute[1]) is list:
resolved_object = self._object_resolution([a_result_attribute[1]])
result_list[a_result_item[0]][a_result_attribute[0]] = resolved_object
except KeyError:
# Not being able to match the label set of a node with a known object results
# in a KeyError in the internal dictionary used for resolution. If it is impossible
# to match, then raise an exception with more details about the error.
raise ModelDefinitionMismatch(a_result_attribute[1], self._NODE_CLASS_REGISTRY)
return result_list | def _object_resolution(self, result_list) | Performs in place automatic object resolution on a set of results
returned by cypher_query.
The function operates recursively in order to be able to resolve Nodes
within nested list structures. Not meant to be called directly,
used primarily by cypher_query.
:param result_list: A list of results as returned by cypher_query.
:type list:
:return: A list of instantiated objects. | 5.030807 | 4.618991 | 1.089157 |
if self._pid != os.getpid():
self.set_connection(self.url)
if self._active_transaction:
session = self._active_transaction
else:
session = self.driver.session()
try:
# Retrieve the data
start = time.time()
response = session.run(query, params)
results, meta = [list(r.values()) for r in response], response.keys()
end = time.time()
if resolve_objects:
# Do any automatic resolution required
results = self._object_resolution(results)
except CypherError as ce:
if ce.code == u'Neo.ClientError.Schema.ConstraintValidationFailed':
if 'already exists with label' in ce.message and handle_unique:
raise UniqueProperty(ce.message)
raise ConstraintValidationFailed(ce.message)
else:
exc_info = sys.exc_info()
if sys.version_info >= (3, 0):
raise exc_info[1].with_traceback(exc_info[2])
else:
raise exc_info[1]
except SessionError:
if retry_on_session_expire:
self.set_connection(self.url)
return self.cypher_query(query=query,
params=params,
handle_unique=handle_unique,
retry_on_session_expire=False)
raise
if os.environ.get('NEOMODEL_CYPHER_DEBUG', False):
logger.debug("query: " + query + "\nparams: " + repr(params) + "\ntook: {:.2g}s\n".format(end - start))
return results, meta | def cypher_query(self, query, params=None, handle_unique=True, retry_on_session_expire=False, resolve_objects=False) | Runs a query on the database and returns a list of results and their headers.
:param query: A CYPHER query
:type: str
:param params: Dictionary of parameters
:type: dict
:param handle_unique: Whether or not to raise UniqueProperty exception on Cypher's ConstraintValidation errors
:type: bool
:param retry_on_session_expire: Whether or not to attempt the same query again if the transaction has expired
:type: bool
:param resolve_objects: Whether to attempt to resolve the returned nodes to data model objects automatically
:type: bool | 2.904351 | 2.838932 | 1.023044 |
if direction == OUTGOING:
stmt = '-{0}->'
elif direction == INCOMING:
stmt = '<-{0}-'
else:
stmt = '-{0}-'
rel_props = ''
if relation_properties:
rel_props = ' {{{0}}}'.format(', '.join(
['{0}: {1}'.format(key, value) for key, value in relation_properties.items()]))
# direct, relation_type=None is unspecified, relation_type
if relation_type is None:
stmt = stmt.format('')
# all("*" wildcard) relation_type
elif relation_type == '*':
stmt = stmt.format('[*]')
else:
# explicit relation_type
stmt = stmt.format('[{0}:`{1}`{2}]'.format(ident if ident else '', relation_type, rel_props))
return "({0}){1}({2})".format(lhs, stmt, rhs) | def _rel_helper(lhs, rhs, ident=None, relation_type=None, direction=None, relation_properties=None, **kwargs) | Generate a relationship matching string, with specified parameters.
Examples:
relation_direction = OUTGOING: (lhs)-[relation_ident:relation_type]->(rhs)
relation_direction = INCOMING: (lhs)<-[relation_ident:relation_type]-(rhs)
relation_direction = EITHER: (lhs)-[relation_ident:relation_type]-(rhs)
:param lhs: The left hand statement.
:type lhs: str
:param rhs: The right hand statement.
:type rhs: str
:param ident: A specific identity to name the relationship, or None.
:type ident: str
:param relation_type: None for all direct rels, * for all of any length, or a name of an explicit rel.
:type relation_type: str
:param direction: None or EITHER for all OUTGOING,INCOMING,EITHER. Otherwise OUTGOING or INCOMING.
:param relation_properties: dictionary of relationship properties to match
:returns: string | 3.511023 | 3.252255 | 1.079566 |
rels = cls.defined_properties(rels=True, aliases=False, properties=False)
for key, value in rels.items():
if hasattr(node_set, key):
raise ValueError("Can't install traversal '{0}' exists on NodeSet".format(key))
rel = getattr(cls, key)
rel._lookup_node_class()
traversal = Traversal(source=node_set, name=key, definition=rel.definition)
setattr(node_set, key, traversal) | def install_traversals(cls, node_set) | For a StructuredNode class install Traversal objects for each
relationship definition on a NodeSet instance | 5.157004 | 4.758096 | 1.083838 |
output = {}
for key, value in kwargs.items():
if '__' in key:
prop, operator = key.rsplit('__')
operator = OPERATOR_TABLE[operator]
else:
prop = key
operator = '='
if prop not in cls.defined_properties(rels=False):
raise ValueError("No such property {0} on {1}".format(prop, cls.__name__))
property_obj = getattr(cls, prop)
if isinstance(property_obj, AliasProperty):
prop = property_obj.aliased_to()
deflated_value = getattr(cls, prop).deflate(value)
else:
# handle special operators
if operator == _SPECIAL_OPERATOR_IN:
if not isinstance(value, tuple) and not isinstance(value, list):
raise ValueError('Value must be a tuple or list for IN operation {0}={1}'.format(key, value))
deflated_value = [property_obj.deflate(v) for v in value]
elif operator == _SPECIAL_OPERATOR_ISNULL:
if not isinstance(value, bool):
raise ValueError('Value must be a bool for isnull operation on {0}'.format(key))
operator = 'IS NULL' if value else 'IS NOT NULL'
deflated_value = None
elif operator in _REGEX_OPERATOR_TABLE.values():
deflated_value = property_obj.deflate(value)
if not isinstance(deflated_value, basestring):
raise ValueError('Must be a string value for {0}'.format(key))
if operator in _STRING_REGEX_OPERATOR_TABLE.values():
deflated_value = re.escape(deflated_value)
deflated_value = operator.format(deflated_value)
operator = _SPECIAL_OPERATOR_REGEX
else:
deflated_value = property_obj.deflate(value)
# map property to correct property name in the database
db_property = cls.defined_properties(rels=False)[prop].db_property or prop
output[db_property] = (operator, deflated_value)
return output | def process_filter_args(cls, kwargs) | loop through properties in filter parameters check they match class definition
deflate them and convert into something easy to generate cypher from | 2.718337 | 2.600133 | 1.04546 |
rel_definitions = cls.defined_properties(properties=False, rels=True, aliases=False)
match, dont_match = {}, {}
for key, value in kwargs.items():
if key not in rel_definitions:
raise ValueError("No such relation {0} defined on a {1}".format(key, cls.__name__))
rhs_ident = key
rel_definitions[key]._lookup_node_class()
if value is True:
match[rhs_ident] = rel_definitions[key].definition
elif value is False:
dont_match[rhs_ident] = rel_definitions[key].definition
elif isinstance(value, NodeSet):
raise NotImplementedError("Not implemented yet")
else:
raise ValueError("Expecting True / False / NodeSet got: " + repr(value))
return match, dont_match | def process_has_args(cls, kwargs) | loop through has parameters check they correspond to class rels defined | 4.587238 | 4.267045 | 1.075039 |
# build source
rhs_label = ':' + traversal.target_class.__label__
# build source
lhs_ident = self.build_source(traversal.source)
rhs_ident = traversal.name + rhs_label
self._ast['return'] = traversal.name
self._ast['result_class'] = traversal.target_class
rel_ident = self.create_ident()
stmt = _rel_helper(lhs=lhs_ident, rhs=rhs_ident, ident=rel_ident, **traversal.definition)
self._ast['match'].append(stmt)
if traversal.filters:
self.build_where_stmt(rel_ident, traversal.filters)
return traversal.name | def build_traversal(self, traversal) | traverse a relationship from a node to a set of nodes | 6.117619 | 5.988289 | 1.021597 |
ident_w_label = ident + ':' + cls.__label__
self._ast['match'].append('({0})'.format(ident_w_label))
self._ast['return'] = ident
self._ast['result_class'] = cls
return ident | def build_label(self, ident, cls) | match nodes by a label | 7.125247 | 6.215385 | 1.146389 |
source_ident = ident
for key, value in node_set.must_match.items():
if isinstance(value, dict):
label = ':' + value['node_class'].__label__
stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **value)
self._ast['where'].append(stmt)
else:
raise ValueError("Expecting dict got: " + repr(value))
for key, val in node_set.dont_match.items():
if isinstance(val, dict):
label = ':' + val['node_class'].__label__
stmt = _rel_helper(lhs=source_ident, rhs=label, ident='', **val)
self._ast['where'].append('NOT ' + stmt)
else:
raise ValueError("Expecting dict got: " + repr(val)) | def build_additional_match(self, ident, node_set) | handle additional matches supplied by 'has()' calls | 3.601422 | 3.461776 | 1.040339 |
if q_filters is not None:
stmts = self._parse_q_filters(ident, q_filters, source_class)
if stmts:
self._ast['where'].append(stmts)
else:
stmts = []
for row in filters:
negate = False
# pre-process NOT cases as they are nested dicts
if '__NOT__' in row and len(row) == 1:
negate = True
row = row['__NOT__']
for prop, op_and_val in row.items():
op, val = op_and_val
if op in _UNARY_OPERATORS:
# unary operators do not have a parameter
statement = '{0} {1}.{2} {3}'.format('NOT' if negate else '', ident, prop, op)
else:
place_holder = self._register_place_holder(ident + '_' + prop)
statement = '{0} {1}.{2} {3} {{{4}}}'.format('NOT' if negate else '', ident, prop, op, place_holder)
self._query_params[place_holder] = val
stmts.append(statement)
self._ast['where'].append(' AND '.join(stmts)) | def build_where_stmt(self, ident, filters, q_filters=None, source_class=None) | construct a where statement from some filters | 3.060202 | 3.006049 | 1.018015 |
return self.query_cls(self).build_ast()._execute(lazy) | def all(self, lazy=False) | Return all nodes belonging to the set
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:return: list of nodes
:rtype: list | 28.96841 | 42.823608 | 0.676459 |
result = self._get(limit=2, lazy=lazy, **kwargs)
if len(result) > 1:
raise MultipleNodesReturned(repr(kwargs))
elif not result:
raise self.source_class.DoesNotExist(repr(kwargs))
else:
return result[0] | def get(self, lazy=False, **kwargs) | Retrieve one node from the set matching supplied parameters
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:param kwargs: same syntax as `filter()`
:return: node | 4.247032 | 4.522357 | 0.939119 |
result = result = self._get(limit=1, **kwargs)
if result:
return result[0]
else:
raise self.source_class.DoesNotExist(repr(kwargs)) | def first(self, **kwargs) | Retrieve the first node from the set matching supplied parameters
:param kwargs: same syntax as `filter()`
:return: node | 6.113148 | 7.581825 | 0.80629 |
if args or kwargs:
self.q_filters = Q(self.q_filters & Q(*args, **kwargs))
return self | def filter(self, *args, **kwargs) | Apply filters to the existing nodes in the set.
:param kwargs: filter parameters
Filters mimic Django's syntax with the double '__' to separate field and operators.
e.g `.filter(salary__gt=20000)` results in `salary > 20000`.
The following operators are available:
* 'lt': less than
* 'gt': greater than
* 'lte': less than or equal to
* 'gte': greater than or equal to
* 'ne': not equal to
* 'in': matches one of list (or tuple)
* 'isnull': is null
* 'regex': matches supplied regex (neo4j regex format)
* 'exact': exactly match string (just '=')
* 'iexact': case insensitive match string
* 'contains': contains string
* 'icontains': case insensitive contains
* 'startswith': string starts with
* 'istartswith': case insensitive string starts with
* 'endswith': string ends with
* 'iendswith': case insensitive string ends with
:return: self | 5.534263 | 9.564728 | 0.578612 |
if args or kwargs:
self.q_filters = Q(self.q_filters & ~Q(*args, **kwargs))
return self | def exclude(self, *args, **kwargs) | Exclude nodes from the NodeSet via filters.
:param kwargs: filter parameters see syntax for the filter method
:return: self | 5.914576 | 9.257101 | 0.638923 |
should_remove = len(props) == 1 and props[0] is None
if not hasattr(self, '_order_by') or should_remove:
self._order_by = []
if should_remove:
return self
if '?' in props:
self._order_by.append('?')
else:
for prop in props:
prop = prop.strip()
if prop.startswith('-'):
prop = prop[1:]
desc = True
else:
desc = False
if prop not in self.source_class.defined_properties(rels=False):
raise ValueError("No such property {0} on {1}".format(
prop, self.source_class.__name__))
property_obj = getattr(self.source_class, prop)
if isinstance(property_obj, AliasProperty):
prop = property_obj.aliased_to()
self._order_by.append(prop + (' DESC' if desc else ''))
return self | def order_by(self, *props) | Order by properties. Prepend with minus to do descending. Pass None to
remove ordering. | 2.699689 | 2.587935 | 1.043183 |
if kwargs:
if self.definition.get('model') is None:
raise ValueError("match() with filter only available on relationships with a model")
output = process_filter_args(self.definition['model'], kwargs)
if output:
self.filters.append(output)
return self | def match(self, **kwargs) | Traverse relationships with properties matching the given parameters.
e.g: `.match(price__lt=10)`
:param kwargs: see `NodeSet.filter()` for syntax
:return: self | 7.350298 | 7.208261 | 1.019705 |
if not isinstance(value,neo4j.types.spatial.Point):
raise TypeError('Invalid datatype to inflate. Expected POINT datatype, received {}'.format(type(value)))
try:
value_point_crs = SRID_TO_CRS[value.srid]
except KeyError:
raise ValueError('Invalid SRID to inflate. '
'Expected one of {}, received {}'.format(SRID_TO_CRS.keys(), value.srid))
if self._crs != value_point_crs:
raise ValueError('Invalid CRS. '
'Expected POINT defined over {}, received {}'.format(self._crs, value_point_crs))
# cartesian
if value.srid == 7203:
return NeomodelPoint(x=value.x, y=value.y)
# cartesian-3d
elif value.srid == 9157:
return NeomodelPoint(x=value.x, y=value.y, z=value.z)
# wgs-84
elif value.srid == 4326:
return NeomodelPoint(longitude=value.longitude, latitude=value.latitude)
# wgs-83-3d
elif value.srid == 4979:
return NeomodelPoint(longitude=value.longitude, latitude=value.latitude, height=value.height) | def inflate(self, value) | Handles the marshalling from Neo4J POINT to NeomodelPoint
:param value: Value returned from the database
:type value: Neo4J POINT
:return: NeomodelPoint | 2.708978 | 2.555041 | 1.060248 |
if not isinstance(value, NeomodelPoint):
raise TypeError('Invalid datatype to deflate. Expected NeomodelPoint, received {}'.format(type(value)))
if not value.crs == self._crs:
raise ValueError('Invalid CRS. '
'Expected NeomodelPoint defined over {}, '
'received NeomodelPoint defined over {}'.format(self._crs, value.crs))
if value.crs == 'cartesian-3d':
return neo4j.types.spatial.CartesianPoint((value.x, value.y, value.z))
elif value.crs == 'cartesian':
return neo4j.types.spatial.CartesianPoint((value.x,value.y))
elif value.crs == 'wgs-84':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude))
elif value.crs == 'wgs-84-3d':
return neo4j.types.spatial.WGS84Point((value.longitude, value.latitude, value.height)) | def deflate(self, value) | Handles the marshalling from NeomodelPoint to Neo4J POINT
:param value: The point that was assigned as value to a property in the model
:type value: NeomodelPoint
:return: Neo4J POINT | 2.62896 | 2.431615 | 1.081158 |
obj = QBase(children, connector, negated)
obj.__class__ = cls
return obj | def _new_instance(cls, children=None, connector=None, negated=False) | Create a new instance of this class when new Nodes (or subclasses) are
needed in the internal code in this class. Normally, it just shadows
__init__(). However, subclasses with an __init__ signature that aren't
an extension of Node.__init__ might need to implement this method to
allow a Node to create a new instance of them (if they have any extra
setting up to do). | 6.503788 | 8.148236 | 0.798184 |
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, QBase) and not data.negated and
(data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data | def add(self, data, conn_type, squash=True) | Combine this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
Return a node which can be used in place of data regardless if the
node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
Args:
conn_type (str, optional ["AND", "OR"]): connection method | 5.855376 | 5.348523 | 1.094765 |
if not issubclass(type(obj), self.definition['node_class']):
raise ValueError("Expected node of class " + self.definition['node_class'].__name__)
if not hasattr(obj, 'id'):
raise ValueError("Can't perform operation on unsaved node " + repr(obj)) | def _check_node(self, obj) | check for valid node i.e correct class and is saved | 4.218899 | 3.825861 | 1.102732 |
self._check_node(node)
if not self.definition['model'] and properties:
raise NotImplementedError(
"Relationship properties without using a relationship model "
"is no longer supported."
)
params = {}
rel_model = self.definition['model']
rp = None # rel_properties
if rel_model:
rp = {}
# need to generate defaults etc to create fake instance
tmp = rel_model(**properties) if properties else rel_model()
# build params and place holders to pass to rel_helper
for p, v in rel_model.deflate(tmp.__properties__).items():
rp[p] = '{' + p + '}'
params[p] = v
if hasattr(tmp, 'pre_save'):
tmp.pre_save()
new_rel = _rel_helper(lhs='us', rhs='them', ident='r', relation_properties=rp, **self.definition)
q = "MATCH (them), (us) WHERE id(them)={them} and id(us)={self} " \
"CREATE UNIQUE" + new_rel
params['them'] = node.id
if not rel_model:
self.source.cypher(q, params)
return True
rel_ = self.source.cypher(q + " RETURN r", params)[0][0][0]
rel_instance = self._set_start_end_cls(rel_model.inflate(rel_), node)
if hasattr(rel_instance, 'post_save'):
rel_instance.post_save()
return rel_instance | def connect(self, node, properties=None) | Connect a node
:param node:
:param properties: for the new relationship
:type: dict
:return: | 6.104908 | 6.01889 | 1.014291 |
self.disconnect_all()
self.connect(node, properties) | def replace(self, node, properties=None) | Disconnect all existing nodes and connect the supplied node
:param node:
:param properties: for the new relationship
:type: dict
:return: | 9.705636 | 6.286666 | 1.543845 |
self._check_node(node)
my_rel = _rel_helper(lhs='us', rhs='them', ident='r', **self.definition)
q = "MATCH " + my_rel + " WHERE id(them)={them} and id(us)={self} RETURN r LIMIT 1"
rels = self.source.cypher(q, {'them': node.id})[0]
if not rels:
return
rel_model = self.definition.get('model') or StructuredRel
return self._set_start_end_cls(rel_model.inflate(rels[0][0]), node) | def relationship(self, node) | Retrieve the relationship object for this first relationship between self and node.
:param node:
:return: StructuredRel | 7.922826 | 7.44643 | 1.063976 |
self._check_node(old_node)
self._check_node(new_node)
if old_node.id == new_node.id:
return
old_rel = _rel_helper(lhs='us', rhs='old', ident='r', **self.definition)
# get list of properties on the existing rel
result, meta = self.source.cypher(
"MATCH (us), (old) WHERE id(us)={self} and id(old)={old} "
"MATCH " + old_rel + " RETURN r", {'old': old_node.id})
if result:
node_properties = _get_node_properties(result[0][0])
existing_properties = node_properties.keys()
else:
raise NotConnected('reconnect', self.source, old_node)
# remove old relationship and create new one
new_rel = _rel_helper(lhs='us', rhs='new', ident='r2', **self.definition)
q = "MATCH (us), (old), (new) " \
"WHERE id(us)={self} and id(old)={old} and id(new)={new} " \
"MATCH " + old_rel
q += " CREATE UNIQUE" + new_rel
# copy over properties if we have
for p in existing_properties:
q += " SET r2.{0} = r.{1}".format(p, p)
q += " WITH r DELETE r"
self.source.cypher(q, {'old': old_node.id, 'new': new_node.id}) | def reconnect(self, old_node, new_node) | Disconnect old_node and connect new_node copying over any properties on the original relationship.
Useful for preventing cardinality violations
:param old_node:
:param new_node:
:return: None | 3.601073 | 3.478624 | 1.0352 |
rel = _rel_helper(lhs='a', rhs='b', ident='r', **self.definition)
q = "MATCH (a), (b) WHERE id(a)={self} and id(b)={them} " \
"MATCH " + rel + " DELETE r"
self.source.cypher(q, {'them': node.id}) | def disconnect(self, node) | Disconnect a node
:param node:
:return: | 9.31967 | 9.863824 | 0.944833 |
rhs = 'b:' + self.definition['node_class'].__label__
rel = _rel_helper(lhs='a', rhs=rhs, ident='r', **self.definition)
q = 'MATCH (a) WHERE id(a)={self} MATCH ' + rel + ' DELETE r'
self.source.cypher(q) | def disconnect_all(self) | Disconnect all nodes
:return: | 13.735942 | 14.892576 | 0.922335 |
nodes = super(ZeroOrOne, self).all()
if len(nodes) == 1:
return nodes[0]
if len(nodes) > 1:
raise CardinalityViolation(self, len(nodes)) | def single(self) | Return the associated node.
:return: node | 4.177344 | 4.588619 | 0.910371 |
if len(self):
raise AttemptedCardinalityViolation(
"Node already has {0} can't connect more".format(self))
else:
return super(ZeroOrOne, self).connect(node, properties) | def connect(self, node, properties=None) | Connect to a node.
:param node:
:type: StructuredNode
:param properties: relationship properties
:type: dict
:return: True / rel instance | 10.015276 | 11.289262 | 0.887151 |
nodes = super(OneOrMore, self).all()
if nodes:
return nodes[0]
raise CardinalityViolation(self, 'none') | def single(self) | Fetch one of the related nodes
:return: Node | 10.530188 | 11.125192 | 0.946517 |
if super(OneOrMore, self).__len__() < 2:
raise AttemptedCardinalityViolation("One or more expected")
return super(OneOrMore, self).disconnect(node) | def disconnect(self, node) | Disconnect node
:param node:
:return: | 8.723251 | 9.291857 | 0.938806 |
nodes = super(One, self).all()
if nodes:
if len(nodes) == 1:
return nodes[0]
else:
raise CardinalityViolation(self, len(nodes))
else:
raise CardinalityViolation(self, 'none') | def single(self) | Return the associated node.
:return: node | 4.148165 | 4.482632 | 0.925386 |
if not hasattr(self.source, 'id'):
raise ValueError("Node has not been saved cannot connect!")
if len(self):
raise AttemptedCardinalityViolation(
"Node already has one relationship"
)
else:
return super(One, self).connect(node, properties) | def connect(self, node, properties=None) | Connect a node
:param node:
:param properties: relationship properties
:return: True / rel instance | 10.710651 | 9.83777 | 1.088728 |
results, meta = db.cypher_query("CALL db.constraints()")
pattern = re.compile(':(.*) \).*\.(\w*)')
for constraint in results:
db.cypher_query('DROP ' + constraint[0])
match = pattern.search(constraint[0])
stdout.write(''' - Droping unique constraint and index on label {0} with property {1}.\n'''.format(
match.group(1), match.group(2)))
stdout.write("\n") | def drop_constraints(quiet=True, stdout=None) | Discover and drop all constraints.
:type: bool
:return: None | 6.30243 | 6.467038 | 0.974547 |
results, meta = db.cypher_query("CALL db.indexes()")
pattern = re.compile(':(.*)\((.*)\)')
for index in results:
db.cypher_query('DROP ' + index[0])
match = pattern.search(index[0])
stdout.write(' - Dropping index on label {0} with property {1}.\n'.format(
match.group(1), match.group(2)))
stdout.write("\n") | def drop_indexes(quiet=True, stdout=None) | Discover and drop all indexes.
:type: bool
:return: None | 4.443593 | 4.476065 | 0.992745 |
if not stdout:
stdout = sys.stdout
stdout.write("Droping constraints...\n")
drop_constraints(quiet=False, stdout=stdout)
stdout.write('Droping indexes...\n')
drop_indexes(quiet=False, stdout=stdout) | def remove_all_labels(stdout=None) | Calls functions for dropping constraints and indexes.
:param stdout: output stream
:return: None | 4.093357 | 2.917747 | 1.402917 |
if not hasattr(cls, '__label__'):
if not quiet:
stdout.write(' ! Skipping class {0}.{1} is abstract\n'.format(cls.__module__, cls.__name__))
return
for name, property in cls.defined_properties(aliases=False, rels=False).items():
db_property = property.db_property or name
if property.index:
if not quiet:
stdout.write(' + Creating index {0} on label {1} for class {2}.{3}\n'.format(
name, cls.__label__, cls.__module__, cls.__name__))
db.cypher_query("CREATE INDEX on :{0}({1}); ".format(
cls.__label__, db_property))
elif property.unique_index:
if not quiet:
stdout.write(' + Creating unique constraint for {0} on label {1} for class {2}.{3}\n'.format(
name, cls.__label__, cls.__module__, cls.__name__))
db.cypher_query("CREATE CONSTRAINT "
"on (n:{0}) ASSERT n.{1} IS UNIQUE; ".format(
cls.__label__, db_property)) | def install_labels(cls, quiet=True, stdout=None) | Setup labels with indexes and constraints for a given class
:param cls: StructuredNode class
:type: class
:param quiet: (default true) enable standard output
:param stdout: stdout stream
:type: bool
:return: None | 2.986118 | 2.823066 | 1.057757 |
if not stdout:
stdout = sys.stdout
def subsub(kls): # recursively return all subclasses
return kls.__subclasses__() + [g for s in kls.__subclasses__() for g in subsub(s)]
stdout.write("Setting up indexes and constraints...\n\n")
i = 0
for cls in subsub(StructuredNode):
stdout.write('Found {0}.{1}\n'.format(cls.__module__, cls.__name__))
install_labels(cls, quiet=False, stdout=stdout)
i += 1
if i:
stdout.write('\n')
stdout.write('Finished {0} classes.\n'.format(i)) | def install_all_labels(stdout=None) | Discover all subclasses of StructuredNode in your application and execute install_labels on each.
Note: code most be loaded (imported) in order for a class to be discovered.
:param stdout: output stream
:return: None | 3.957535 | 3.43084 | 1.153518 |
query_params = dict(merge_params=merge_params)
n_merge = "n:{0} {{{1}}}".format(
":".join(cls.inherited_labels()),
", ".join("{0}: params.create.{0}".format(getattr(cls, p).db_property or p) for p in cls.__required_properties__))
if relationship is None:
# create "simple" unwind query
query = "UNWIND {{merge_params}} as params\n MERGE ({0})\n ".format(n_merge)
else:
# validate relationship
if not isinstance(relationship.source, StructuredNode):
raise ValueError("relationship source [{0}] is not a StructuredNode".format(repr(relationship.source)))
relation_type = relationship.definition.get('relation_type')
if not relation_type:
raise ValueError('No relation_type is specified on provided relationship')
from .match import _rel_helper
query_params["source_id"] = relationship.source.id
query = "MATCH (source:{0}) WHERE ID(source) = {{source_id}}\n ".format(relationship.source.__label__)
query += "WITH source\n UNWIND {merge_params} as params \n "
query += "MERGE "
query += _rel_helper(lhs='source', rhs=n_merge, ident=None,
relation_type=relation_type, direction=relationship.definition['direction'])
query += "ON CREATE SET n = params.create\n "
# if update_existing, write properties on match as well
if update_existing is True:
query += "ON MATCH SET n += params.update\n"
# close query
if lazy:
query += "RETURN id(n)"
else:
query += "RETURN n"
return query, query_params | def _build_merge_query(cls, merge_params, update_existing=False, lazy=False, relationship=None) | Get a tuple of a CYPHER query and a params dict for the specified MERGE query.
:param merge_params: The target node match parameters, each node must have a "create" key and optional "update".
:type merge_params: list of dict
:param update_existing: True to update properties of existing nodes, default False to keep existing values.
:type update_existing: bool
:rtype: tuple | 4.683854 | 4.495694 | 1.041853 |
if 'streaming' in kwargs:
warnings.warn('streaming is not supported by bolt, please remove the kwarg',
category=DeprecationWarning, stacklevel=1)
lazy = kwargs.get('lazy', False)
# create mapped query
query = "CREATE (n:{0} {{create_params}})".format(':'.join(cls.inherited_labels()))
# close query
if lazy:
query += " RETURN id(n)"
else:
query += " RETURN n"
results = []
for item in [cls.deflate(p, obj=_UnsavedNode(), skip_empty=True) for p in props]:
node, _ = db.cypher_query(query, {'create_params': item})
results.extend(node[0])
nodes = [cls.inflate(node) for node in results]
if not lazy and hasattr(cls, 'post_create'):
for node in nodes:
node.post_create()
return nodes | def create(cls, *props, **kwargs) | Call to CREATE with parameters map. A new instance will be created and saved.
:param props: dict of properties to create the nodes.
:type props: tuple
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:type: bool
:rtype: list | 5.562369 | 5.21945 | 1.0657 |
lazy = kwargs.get('lazy', False)
relationship = kwargs.get('relationship')
# build merge query, make sure to update only explicitly specified properties
create_or_update_params = []
for specified, deflated in [(p, cls.deflate(p, skip_empty=True)) for p in props]:
create_or_update_params.append({"create": deflated,
"update": dict((k, v) for k, v in deflated.items() if k in specified)})
query, params = cls._build_merge_query(create_or_update_params, update_existing=True, relationship=relationship,
lazy=lazy)
if 'streaming' in kwargs:
warnings.warn('streaming is not supported by bolt, please remove the kwarg',
category=DeprecationWarning, stacklevel=1)
# fetch and build instance for each result
results = db.cypher_query(query, params)
return [cls.inflate(r[0]) for r in results[0]] | def create_or_update(cls, *props, **kwargs) | Call to MERGE with parameters map. A new instance will be created and saved if does not already exists,
this is an atomic operation. If an instance already exists all optional properties specified will be updated.
Note that the post_create hook isn't called after create_or_update
:param props: List of dict arguments to get or create the entities with.
:type props: tuple
:param relationship: Optional, relationship to get/create on when new entity is created.
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:rtype: list | 5.258245 | 4.953494 | 1.061522 |
self._pre_action_check('cypher')
params = params or {}
params.update({'self': self.id})
return db.cypher_query(query, params) | def cypher(self, query, params=None) | Execute a cypher query with the param 'self' pre-populated with the nodes neo4j id.
:param query: cypher query string
:type: string
:param params: query parameters
:type: dict
:return: list containing query results
:rtype: list | 6.020092 | 5.284304 | 1.13924 |
self._pre_action_check('delete')
self.cypher("MATCH (self) WHERE id(self)={self} "
"OPTIONAL MATCH (self)-[r]-()"
" DELETE r, self")
delattr(self, 'id')
self.deleted = True
return True | def delete(self) | Delete a node and it's relationships
:return: True | 5.793626 | 5.521559 | 1.049274 |
lazy = kwargs.get('lazy', False)
relationship = kwargs.get('relationship')
# build merge query
get_or_create_params = [{"create": cls.deflate(p, skip_empty=True)} for p in props]
query, params = cls._build_merge_query(get_or_create_params, relationship=relationship, lazy=lazy)
if 'streaming' in kwargs:
warnings.warn('streaming is not supported by bolt, please remove the kwarg',
category=DeprecationWarning, stacklevel=1)
# fetch and build instance for each result
results = db.cypher_query(query, params)
return [cls.inflate(r[0]) for r in results[0]] | def get_or_create(cls, *props, **kwargs) | Call to MERGE with parameters map. A new instance will be created and saved if does not already exists,
this is an atomic operation.
Parameters must contain all required properties, any non required properties with defaults will be generated.
Note that the post_create hook isn't called after get_or_create
:param props: dict of properties to get or create the entities with.
:type props: tuple
:param relationship: Optional, relationship to get/create on when new entity is created.
:param lazy: False by default, specify True to get nodes with id only without the parameters.
:rtype: list | 5.751872 | 5.386629 | 1.067805 |
# support lazy loading
if isinstance(node, int):
snode = cls()
snode.id = node
else:
node_properties = _get_node_properties(node)
props = {}
for key, prop in cls.__all_properties__:
# map property name from database to object property
db_property = prop.db_property or key
if db_property in node_properties:
props[key] = prop.inflate(node_properties[db_property], node)
elif prop.has_default:
props[key] = prop.default_value()
else:
props[key] = None
snode = cls(**props)
snode.id = node.id
return snode | def inflate(cls, node) | Inflate a raw neo4j_driver node to a neomodel node
:param node:
:return: node object | 3.423647 | 3.362659 | 1.018137 |
return [scls.__label__ for scls in cls.mro()
if hasattr(scls, '__label__') and not hasattr(
scls, '__abstract_node__')] | def inherited_labels(cls) | Return list of labels from nodes class hierarchy.
:return: list | 5.55596 | 6.55151 | 0.848043 |
self._pre_action_check('refresh')
if hasattr(self, 'id'):
request = self.cypher("MATCH (n) WHERE id(n)={self}"
" RETURN n")[0]
if not request or not request[0]:
raise self.__class__.DoesNotExist("Can't refresh non existent node")
node = self.inflate(request[0][0])
for key, val in node.__properties__.items():
setattr(self, key, val)
else:
raise ValueError("Can't refresh unsaved node") | def refresh(self) | Reload the node from neo4j | 5.197277 | 4.619745 | 1.125014 |
# create or update instance node
if hasattr(self, 'id'):
# update
params = self.deflate(self.__properties__, self)
query = "MATCH (n) WHERE id(n)={self} \n"
query += "\n".join(["SET n.{0} = {{{1}}}".format(key, key) + "\n"
for key in params.keys()])
for label in self.inherited_labels():
query += "SET n:`{0}`\n".format(label)
self.cypher(query, params)
elif hasattr(self, 'deleted') and self.deleted:
raise ValueError("{0}.save() attempted on deleted node".format(
self.__class__.__name__))
else: # create
self.id = self.create(self.__properties__)[0].id
return self | def save(self) | Save the node to neo4j or raise an exception
:return: the node instance | 5.381872 | 4.973657 | 1.082076 |
if self.has_default:
if hasattr(self.default, '__call__'):
return self.default()
else:
return self.default
else:
raise Exception("No default value specified") | def default_value(self) | Generate a default value
:return: the value | 3.019315 | 3.262582 | 0.925437 |
props = self.deflate(self.__properties__)
query = "MATCH ()-[r]->() WHERE id(r)={self} "
for key in props:
query += " SET r.{0} = {{{1}}}".format(key, key)
props['self'] = self.id
db.cypher_query(query, props)
return self | def save(self) | Save the relationship
:return: self | 6.509632 | 6.076499 | 1.07128 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.