code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
self._request_params = {'input': input}
if lat_lng is not None or location is not None:
lat_lng_str = self._generate_lat_lng_string(lat_lng, location)
self._request_params['location'] = lat_lng_str
self._request_params['radius'] = radius
if types:
self._request_params['types'] = types
if len(components) > 0:
self._request_params['components'] = '|'.join(['{}:{}'.format(
c[0],c[1]) for c in components])
if language is not None:
self._request_params['language'] = language
self._add_required_param_keys()
url, places_response = _fetch_remote_json(
GooglePlaces.AUTOCOMPLETE_API_URL, self._request_params)
_validate_response(url, places_response)
return GoogleAutocompleteSearchResult(self, places_response) | def autocomplete(self, input, lat_lng=None, location=None, radius=3200,
language=lang.ENGLISH, types=None, components=[]) | Perform an autocomplete search using the Google Places API.
Only the input kwarg is required, the rest of the keyword arguments
are optional.
keyword arguments:
input -- The text string on which to search, for example:
"Hattie B's".
lat_lng -- A dict containing the following keys: lat, lng
(default None)
location -- A human readable location, e.g 'London, England'
(default None)
radius -- The radius (in meters) around the location to which the
search is to be restricted. The maximum is 50000 meters.
(default 3200)
language -- The language code, indicating in which language the
results should be returned, if possible. (default lang.ENGLISH)
types -- A type to search against. See `types.py` "autocomplete types"
for complete list
https://developers.google.com/places/documentation/autocomplete#place_types.
components -- An optional grouping of places to which you would
like to restrict your results. An array containing one or
more tuples of:
* country: matches a country name or a two letter ISO 3166-1 country code.
eg: [('country','US')] | 2.756356 | 2.898343 | 0.951011 |
if keyword is None and name is None and len(types) is 0:
raise ValueError('One of keyword, name or types must be supplied.')
if location is None and lat_lng is None:
raise ValueError('One of location or lat_lng must be passed in.')
try:
radius = int(radius)
except:
raise ValueError('radius must be passed supplied as an integer.')
if sensor not in [True, False]:
raise ValueError('sensor must be passed in as a boolean value.')
self._request_params = {'radius': radius}
self._sensor = sensor
self._request_params['location'] = self._generate_lat_lng_string(
lat_lng, location)
if keyword is not None:
self._request_params['keyword'] = keyword
if name is not None:
self._request_params['name'] = name
if type:
self._request_params['type'] = type
elif types:
if len(types) == 1:
self._request_params['type'] = types[0]
elif len(types) > 1:
self._request_params['types'] = '|'.join(types)
if language is not None:
self._request_params['language'] = language
if opennow is True:
self._request_params['opennow'] = 'true'
self._add_required_param_keys()
url, places_response = _fetch_remote_json(
GooglePlaces.RADAR_SEARCH_API_URL, self._request_params)
_validate_response(url, places_response)
return GooglePlacesSearchResult(self, places_response) | def radar_search(self, sensor=False, keyword=None, name=None,
language=lang.ENGLISH, lat_lng=None, opennow=False,
radius=3200, type=None, types=[], location=None) | Perform a radar search using the Google Places API.
One of lat_lng or location are required, the rest of the keyword
arguments are optional.
keyword arguments:
keyword -- A term to be matched against all available fields, including
but not limited to name, type, and address (default None)
name -- A term to be matched against the names of Places. Results will
be restricted to those containing the passed name value.
language -- The language code, indicating in which language the
results should be returned, if possible. (default lang.ENGLISH)
lat_lng -- A dict containing the following keys: lat, lng
(default None)
location -- A human readable location, e.g 'London, England'
(default None)
radius -- The radius (in meters) around the location/lat_lng to
restrict the search to. The maximum is 50000 meters.
(default 3200)
opennow -- Returns only those Places that are open for business at the time
the query is sent. (default False)
sensor -- Indicates whether or not the Place request came from a
device using a location sensor (default False).
type -- Optional type param used to indicate place category
types -- An optional list of types, restricting the results to
Places (default []). If there is only one item the request
will be send as type param | 2.451072 | 2.54631 | 0.962597 |
data = {'placeid': place_id}
url, checkin_response = _fetch_remote_json(
GooglePlaces.CHECKIN_API_URL % (str(sensor).lower(),
self.api_key), json.dumps(data), use_http_post=True)
_validate_response(url, checkin_response) | def checkin(self, place_id, sensor=False) | Checks in a user to a place.
keyword arguments:
place_id -- The unique Google identifier for the relevant place.
sensor -- Boolean flag denoting if the location came from a
device using its location sensor (default False). | 7.208839 | 7.63393 | 0.944316 |
place_details = _get_place_details(place_id,
self.api_key, sensor, language=language)
return Place(self, place_details) | def get_place(self, place_id, sensor=False, language=lang.ENGLISH) | Gets a detailed place object.
keyword arguments:
place_id -- The unique Google identifier for the required place.
sensor -- Boolean flag denoting if the location came from a
device using its' location sensor (default False).
language -- The language code, indicating in which language the
results should be returned, if possible. (default lang.ENGLISH) | 4.231815 | 7.254784 | 0.583314 |
required_kwargs = {'name': [str], 'lat_lng': [dict],
'accuracy': [int], 'types': [str, list]}
request_params = {}
for key in required_kwargs:
if key not in kwargs or kwargs[key] is None:
raise ValueError('The %s argument is required.' % key)
expected_types = required_kwargs[key]
type_is_valid = False
for expected_type in expected_types:
if isinstance(kwargs[key], expected_type):
type_is_valid = True
break
if not type_is_valid:
raise ValueError('Invalid value for %s' % key)
if key is not 'lat_lng':
request_params[key] = kwargs[key]
if len(kwargs['name']) > 255:
raise ValueError('The place name must not exceed 255 characters ' +
'in length.')
try:
kwargs['lat_lng']['lat']
kwargs['lat_lng']['lng']
request_params['location'] = kwargs['lat_lng']
except KeyError:
raise ValueError('Invalid keys for lat_lng.')
request_params['language'] = (kwargs.get('language')
if kwargs.get('language') is not None else
lang.ENGLISH)
sensor = (kwargs.get('sensor')
if kwargs.get('sensor') is not None else
False)
# At some point Google might support multiple types, so this supports
# strings and lists.
if isinstance(kwargs['types'], str):
request_params['types'] = [kwargs['types']]
else:
request_params['types'] = kwargs['types']
url, add_response = _fetch_remote_json(
GooglePlaces.ADD_API_URL % (str(sensor).lower(),
self.api_key), json.dumps(request_params), use_http_post=True)
_validate_response(url, add_response)
return {'place_id': add_response['place_id'],
'id': add_response['id']} | def add_place(self, **kwargs) | Adds a place to the Google Places database.
On a successful request, this method will return a dict containing
the the new Place's place_id and id in keys 'place_id' and 'id'
respectively.
keyword arguments:
name -- The full text name of the Place. Limited to 255
characters.
lat_lng -- A dict containing the following keys: lat, lng.
accuracy -- The accuracy of the location signal on which this request
is based, expressed in meters.
types -- The category in which this Place belongs. Only one type
can currently be specified for a Place. A string or
single element list may be passed in.
language -- The language in which the Place's name is being reported.
(defaults 'en').
sensor -- Boolean flag denoting if the location came from a device
using its location sensor (default False). | 3.029236 | 2.72911 | 1.109972 |
request_params = {'place_id': place_id}
url, delete_response = _fetch_remote_json(
GooglePlaces.DELETE_API_URL % (str(sensor).lower(),
self.api_key), json.dumps(request_params), use_http_post=True)
_validate_response(url, delete_response) | def delete_place(self, place_id, sensor=False) | Deletes a place from the Google Places database.
keyword arguments:
place_id -- The textual identifier that uniquely identifies this
Place, returned from a Place Search request.
sensor -- Boolean flag denoting if the location came from a device
using its location sensor (default False). | 6.995019 | 6.551627 | 1.067677 |
if self._types == '' and self.details != None and 'types' in self.details:
self._icon = self.details['types']
return self._types | def types(self) | Returns a list of feature types describing the given result. | 7.277732 | 6.805604 | 1.069374 |
if self._place is None:
if language is None:
try:
language = self._query_instance._request_params['language']
except KeyError:
language = lang.ENGLISH
place = _get_place_details(
self.place_id, self._query_instance.api_key,
self._query_instance.sensor, language=language)
self._place = Place(self._query_instance, place) | def get_details(self, language=None) | Retrieves full information on the place matching the place_id.
Stores the response in the `place` property. | 4.00203 | 3.498663 | 1.143874 |
if self._icon == '' and self.details != None and 'icon' in self.details:
self._icon = self.details['icon']
return self._icon | def icon(self) | Returns the URL of a recommended icon for display. | 3.766361 | 3.118995 | 1.207556 |
if self._name == '' and self.details != None and 'name' in self.details:
self._name = self.details['name']
return self._name | def name(self) | Returns the human-readable name of the place. | 3.548166 | 2.900116 | 1.223457 |
if self._vicinity == '' and self.details != None and 'vicinity' in self.details:
self._vicinity = self.details['vicinity']
return self._vicinity | def vicinity(self) | Returns a feature name of a nearby location.
Often this feature refers to a street or neighborhood within the given
results. | 3.304964 | 3.445233 | 0.959286 |
if self._rating == '' and self.details != None and 'rating' in self.details:
self._rating = self.details['rating']
return self._rating | def rating(self) | Returns the Place's rating, from 0.0 to 5.0, based on user reviews.
This method will return None for places that have no rating. | 3.792635 | 3.602756 | 1.052704 |
self._query_instance.checkin(self.place_id,
self._query_instance.sensor) | def checkin(self) | Checks in an anonymous user in. | 17.501377 | 16.547939 | 1.057617 |
if self._details is None:
if language is None:
try:
language = self._query_instance._request_params['language']
except KeyError:
language = lang.ENGLISH
self._details = _get_place_details(
self.place_id, self._query_instance.api_key,
self._query_instance.sensor, language=language) | def get_details(self, language=None) | Retrieves full information on the place matching the place_id.
Further attributes will be made available on the instance once this
method has been invoked.
keyword arguments:
language -- The language code, indicating in which language the
results should be returned, if possible. This value defaults
to the language that was used to generate the
GooglePlacesSearchResult instance. | 4.023024 | 3.770149 | 1.067073 |
if not maxheight and not maxwidth:
raise GooglePlacesError('You must specify maxheight or maxwidth!')
result = _get_place_photo(self.photo_reference,
self._query_instance.api_key,
maxheight=maxheight, maxwidth=maxwidth,
sensor=sensor)
self.mimetype, self.filename, self.data, self.url = result | def get(self, maxheight=None, maxwidth=None, sensor=False) | Fetch photo from API. | 5.220582 | 4.75826 | 1.097162 |
# type: (bytes, Tuple[int, int], int, Optional[str]) -> Optional[bytes]
width, height = size
line = width * 3
png_filter = struct.pack(">B", 0)
scanlines = b"".join(
[png_filter + data[y * line : y * line + line] for y in range(height)]
)
magic = struct.pack(">8B", 137, 80, 78, 71, 13, 10, 26, 10)
# Header: size, marker, data, CRC32
ihdr = [b"", b"IHDR", b"", b""]
ihdr[2] = struct.pack(">2I5B", width, height, 8, 2, 0, 0, 0)
ihdr[3] = struct.pack(">I", zlib.crc32(b"".join(ihdr[1:3])) & 0xFFFFFFFF)
ihdr[0] = struct.pack(">I", len(ihdr[2]))
# Data: size, marker, data, CRC32
idat = [b"", b"IDAT", zlib.compress(scanlines, level), b""]
idat[3] = struct.pack(">I", zlib.crc32(b"".join(idat[1:3])) & 0xFFFFFFFF)
idat[0] = struct.pack(">I", len(idat[2]))
# Footer: size, marker, None, CRC32
iend = [b"", b"IEND", b"", b""]
iend[3] = struct.pack(">I", zlib.crc32(iend[1]) & 0xFFFFFFFF)
iend[0] = struct.pack(">I", len(iend[2]))
if not output:
# Returns raw bytes of the whole PNG data
return magic + b"".join(ihdr + idat + iend)
with open(output, "wb") as fileh:
fileh.write(magic)
fileh.write(b"".join(ihdr))
fileh.write(b"".join(idat))
fileh.write(b"".join(iend))
return None | def to_png(data, size, level=6, output=None) | Dump data to a PNG file. If `output` is `None`, create no file but return
the whole PNG data.
:param bytes data: RGBRGB...RGB data.
:param tuple size: The (width, height) pair.
:param int level: PNG compression level.
:param str output: Output file name. | 1.924654 | 1.971253 | 0.976361 |
# type: () -> None
def cfactory(func, argtypes, restype):
# type: (str, List[Any], Any) -> None
self._cfactory(
attr=self.core, func=func, argtypes=argtypes, restype=restype
)
uint32 = ctypes.c_uint32
void = ctypes.c_void_p
size_t = ctypes.c_size_t
pointer = ctypes.POINTER
cfactory(
func="CGGetActiveDisplayList",
argtypes=[uint32, pointer(uint32), pointer(uint32)],
restype=ctypes.c_int32,
)
cfactory(func="CGDisplayBounds", argtypes=[uint32], restype=CGRect)
cfactory(func="CGRectStandardize", argtypes=[CGRect], restype=CGRect)
cfactory(func="CGRectUnion", argtypes=[CGRect, CGRect], restype=CGRect)
cfactory(func="CGDisplayRotation", argtypes=[uint32], restype=ctypes.c_float)
cfactory(
func="CGWindowListCreateImage",
argtypes=[CGRect, uint32, uint32, uint32],
restype=void,
)
cfactory(func="CGImageGetWidth", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetHeight", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetDataProvider", argtypes=[void], restype=void)
cfactory(func="CGDataProviderCopyData", argtypes=[void], restype=void)
cfactory(func="CFDataGetBytePtr", argtypes=[void], restype=void)
cfactory(func="CFDataGetLength", argtypes=[void], restype=ctypes.c_uint64)
cfactory(func="CGImageGetBytesPerRow", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetBitsPerPixel", argtypes=[void], restype=size_t)
cfactory(func="CGDataProviderRelease", argtypes=[void], restype=void)
cfactory(func="CFRelease", argtypes=[void], restype=void) | def _set_cfunctions(self) | Set all ctypes functions and attach them to attributes. | 2.021688 | 2.037845 | 0.992072 |
# type: () -> Monitors
if not self._monitors:
int_ = int
core = self.core
# All monitors
# We need to update the value with every single monitor found
# using CGRectUnion. Else we will end with infinite values.
all_monitors = CGRect()
self._monitors.append({})
# Each monitors
display_count = ctypes.c_uint32(0)
active_displays = (ctypes.c_uint32 * self.max_displays)()
core.CGGetActiveDisplayList(
self.max_displays, active_displays, ctypes.byref(display_count)
)
rotations = {0.0: "normal", 90.0: "right", -90.0: "left"}
for idx in range(display_count.value):
display = active_displays[idx]
rect = core.CGDisplayBounds(display)
rect = core.CGRectStandardize(rect)
width, height = rect.size.width, rect.size.height
rot = core.CGDisplayRotation(display)
if rotations[rot] in ["left", "right"]:
width, height = height, width
self._monitors.append(
{
"left": int_(rect.origin.x),
"top": int_(rect.origin.y),
"width": int_(width),
"height": int_(height),
}
)
# Update AiO monitor's values
all_monitors = core.CGRectUnion(all_monitors, rect)
# Set the AiO monitor's values
self._monitors[0] = {
"left": int_(all_monitors.origin.x),
"top": int_(all_monitors.origin.y),
"width": int_(all_monitors.size.width),
"height": int_(all_monitors.size.height),
}
return self._monitors | def monitors(self) | Get positions of monitors (see parent class). | 3.10276 | 3.133069 | 0.990326 |
# type: (Monitor) -> ScreenShot
# pylint: disable=too-many-locals
# Convert PIL bbox style
if isinstance(monitor, tuple):
monitor = {
"left": monitor[0],
"top": monitor[1],
"width": monitor[2] - monitor[0],
"height": monitor[3] - monitor[1],
}
core = self.core
rect = CGRect(
(monitor["left"], monitor["top"]), (monitor["width"], monitor["height"])
)
image_ref = core.CGWindowListCreateImage(rect, 1, 0, 0)
if not image_ref:
raise ScreenShotError("CoreGraphics.CGWindowListCreateImage() failed.")
width = int(core.CGImageGetWidth(image_ref))
height = int(core.CGImageGetHeight(image_ref))
prov = copy_data = None
try:
prov = core.CGImageGetDataProvider(image_ref)
copy_data = core.CGDataProviderCopyData(prov)
data_ref = core.CFDataGetBytePtr(copy_data)
buf_len = core.CFDataGetLength(copy_data)
raw = ctypes.cast(data_ref, ctypes.POINTER(ctypes.c_ubyte * buf_len))
data = bytearray(raw.contents)
# Remove padding per row
bytes_per_row = int(core.CGImageGetBytesPerRow(image_ref))
bytes_per_pixel = int(core.CGImageGetBitsPerPixel(image_ref))
bytes_per_pixel = (bytes_per_pixel + 7) // 8
if bytes_per_pixel * width != bytes_per_row:
cropped = bytearray()
for row in range(height):
start = row * bytes_per_row
end = start + width * bytes_per_pixel
cropped.extend(data[start:end])
data = cropped
finally:
if prov:
core.CGDataProviderRelease(prov)
if copy_data:
core.CFRelease(copy_data)
return self.cls_image(data, monitor, size=Size(width, height)) | def grab(self, monitor) | See :meth:`MSSMixin.grab <mss.base.MSSMixin.grab>` for full details. | 2.639365 | 2.673209 | 0.98734 |
# type: (bytearray, int, int) -> ScreenShot
monitor = {"left": 0, "top": 0, "width": width, "height": height}
return cls(data, monitor) | def from_size(cls, data, width, height) | Instantiate a new class given only screen shot's data and size. | 4.489896 | 4.179542 | 1.074255 |
# type: () -> Pixels
if not self.__pixels:
rgb_tuples = zip(
self.raw[2::4], self.raw[1::4], self.raw[0::4]
) # type: Iterator[Pixel]
self.__pixels = list(zip(*[iter(rgb_tuples)] * self.width)) # type: ignore
return self.__pixels | def pixels(self) | :return list: RGB tuples. | 3.809611 | 3.778973 | 1.008107 |
# type: () -> bytes
if not self.__rgb:
rgb = bytearray(self.height * self.width * 3)
raw = self.raw
rgb[0::3] = raw[2::4]
rgb[1::3] = raw[1::4]
rgb[2::3] = raw[0::4]
self.__rgb = bytes(rgb)
return self.__rgb | def rgb(self) | Compute RGB values from the BGRA raw pixels.
:return bytes: RGB pixels. | 2.499783 | 2.71295 | 0.921426 |
# type: (int, int) -> Pixel
try:
return self.pixels[coord_y][coord_x] # type: ignore
except IndexError:
raise ScreenShotError(
"Pixel location ({}, {}) is out of range.".format(coord_x, coord_y)
) | def pixel(self, coord_x, coord_y) | Returns the pixel value at a given position.
:param int coord_x: The x coordinate.
:param int coord_y: The y coordinate.
:return tuple: The pixel value as (R, G, B). | 3.679286 | 4.442433 | 0.828214 |
# type: (Optional[List[str]]) -> int
cli_args = ArgumentParser()
cli_args.add_argument(
"-c",
"--coordinates",
default="",
type=str,
help="the part of the screen to capture: top, left, width, height",
)
cli_args.add_argument(
"-l",
"--level",
default=6,
type=int,
choices=list(range(10)),
help="the PNG compression level",
)
cli_args.add_argument(
"-m", "--monitor", default=0, type=int, help="the monitor to screen shot"
)
cli_args.add_argument(
"-o", "--output", default="monitor-{mon}.png", help="the output file name"
)
cli_args.add_argument(
"-q",
"--quiet",
default=False,
action="store_true",
help="do not print created files",
)
cli_args.add_argument("-v", "--version", action="version", version=__version__)
options = cli_args.parse_args(args)
kwargs = {"mon": options.monitor, "output": options.output}
if options.coordinates:
try:
top, left, width, height = options.coordinates.split(",")
except ValueError:
print("Coordinates syntax: top, left, width, height")
return 2
kwargs["mon"] = {
"top": int(top),
"left": int(left),
"width": int(width),
"height": int(height),
}
if options.output == "monitor-{mon}.png":
kwargs["output"] = "sct-{top}x{left}_{width}x{height}.png"
try:
with mss() as sct:
if options.coordinates:
output = kwargs["output"].format(**kwargs["mon"])
sct_img = sct.grab(kwargs["mon"])
to_png(sct_img.rgb, sct_img.size, level=options.level, output=output)
if not options.quiet:
print(os.path.realpath(output))
else:
for file_name in sct.save(**kwargs):
if not options.quiet:
print(os.path.realpath(file_name))
return 0
except ScreenShotError:
return 1 | def main(args=None) | Main logic. | 2.281898 | 2.278322 | 1.00157 |
# type: (Any, Any) -> int
evt = event.contents
ERROR.details = {
"type": evt.type,
"serial": evt.serial,
"error_code": evt.error_code,
"request_code": evt.request_code,
"minor_code": evt.minor_code,
}
return 0 | def error_handler(_, event) | Specifies the program's supplied error handler. | 5.152556 | 5.028472 | 1.024676 |
# type: (int, Any, Tuple[Any, Any]) -> Optional[Tuple[Any, Any]]
if retval != 0 and not ERROR.details:
return args
err = "{}() failed".format(func.__name__)
details = {"retval": retval, "args": args}
raise ScreenShotError(err, details=details) | def validate(retval, func, args) | Validate the returned value of a Xlib or XRANDR function. | 6.31192 | 6.478781 | 0.974245 |
def cfactory(attr=self.xlib, func=None, argtypes=None, restype=None):
# type: (Any, str, List[Any], Any) -> None
self._cfactory(
attr=attr,
errcheck=validate,
func=func,
argtypes=argtypes,
restype=restype,
)
void = ctypes.c_void_p
c_int = ctypes.c_int
uint = ctypes.c_uint
ulong = ctypes.c_ulong
c_long = ctypes.c_long
char_p = ctypes.c_char_p
pointer = ctypes.POINTER
cfactory(func="XSetErrorHandler", argtypes=[void], restype=c_int)
cfactory(
func="XGetErrorText",
argtypes=[pointer(Display), c_int, char_p, c_int],
restype=void,
)
cfactory(func="XOpenDisplay", argtypes=[char_p], restype=pointer(Display))
cfactory(
func="XDefaultRootWindow",
argtypes=[pointer(Display)],
restype=pointer(XWindowAttributes),
)
cfactory(
func="XGetWindowAttributes",
argtypes=[
pointer(Display),
pointer(XWindowAttributes),
pointer(XWindowAttributes),
],
restype=c_int,
)
cfactory(
func="XGetImage",
argtypes=[
pointer(Display),
pointer(Display),
c_int,
c_int,
uint,
uint,
ulong,
c_int,
],
restype=pointer(XImage),
)
cfactory(func="XDestroyImage", argtypes=[pointer(XImage)], restype=void)
# A simple benchmark calling 10 times those 2 functions:
# XRRGetScreenResources(): 0.1755971429956844 s
# XRRGetScreenResourcesCurrent(): 0.0039125580078689 s
# The second is faster by a factor of 44! So try to use it first.
try:
cfactory(
attr=self.xrandr,
func="XRRGetScreenResourcesCurrent",
argtypes=[pointer(Display), pointer(Display)],
restype=pointer(XRRScreenResources),
)
except AttributeError:
cfactory(
attr=self.xrandr,
func="XRRGetScreenResources",
argtypes=[pointer(Display), pointer(Display)],
restype=pointer(XRRScreenResources),
)
self.xrandr.XRRGetScreenResourcesCurrent = self.xrandr.XRRGetScreenResources
cfactory(
attr=self.xrandr,
func="XRRGetCrtcInfo",
argtypes=[pointer(Display), pointer(XRRScreenResources), c_long],
restype=pointer(XRRCrtcInfo),
)
cfactory(
attr=self.xrandr,
func="XRRFreeScreenResources",
argtypes=[pointer(XRRScreenResources)],
restype=void,
)
cfactory(
attr=self.xrandr,
func="XRRFreeCrtcInfo",
argtypes=[pointer(XRRCrtcInfo)],
restype=void,
) | def _set_cfunctions(self) | Set all ctypes functions and attach them to attributes.
See https://tronche.com/gui/x/xlib/function-index.html for details. | 2.581787 | 2.532644 | 1.019404 |
# type: () -> Optional[Dict[str, Any]]
details = {} # type: Dict[str, Any]
if ERROR.details:
details = {"xerror_details": ERROR.details}
ERROR.details = None
xserver_error = ctypes.create_string_buffer(1024)
self.xlib.XGetErrorText(
MSS.display,
details.get("xerror_details", {}).get("error_code", 0),
xserver_error,
len(xserver_error),
)
xerror = xserver_error.value.decode("utf-8")
if xerror != "0":
details["xerror"] = xerror
return details | def get_error_details(self) | Get more information about the latest X server error. | 3.915702 | 3.802524 | 1.029764 |
# type: () -> Monitors
if not self._monitors:
display = MSS.display
int_ = int
xrandr = self.xrandr
# All monitors
gwa = XWindowAttributes()
self.xlib.XGetWindowAttributes(display, self.root, ctypes.byref(gwa))
self._monitors.append(
{
"left": int_(gwa.x),
"top": int_(gwa.y),
"width": int_(gwa.width),
"height": int_(gwa.height),
}
)
# Each monitors
mon = xrandr.XRRGetScreenResourcesCurrent(display, self.drawable).contents
crtcs = mon.crtcs
for idx in range(mon.ncrtc):
crtc = xrandr.XRRGetCrtcInfo(display, mon, crtcs[idx]).contents
if crtc.noutput == 0:
xrandr.XRRFreeCrtcInfo(crtc)
continue
self._monitors.append(
{
"left": int_(crtc.x),
"top": int_(crtc.y),
"width": int_(crtc.width),
"height": int_(crtc.height),
}
)
xrandr.XRRFreeCrtcInfo(crtc)
xrandr.XRRFreeScreenResources(mon)
return self._monitors | def monitors(self) | Get positions of monitors (see parent class property). | 3.145526 | 3.133867 | 1.00372 |
# type: (Monitor) -> ScreenShot
# Convert PIL bbox style
if isinstance(monitor, tuple):
monitor = {
"left": monitor[0],
"top": monitor[1],
"width": monitor[2] - monitor[0],
"height": monitor[3] - monitor[1],
}
ximage = self.xlib.XGetImage(
MSS.display,
self.drawable,
monitor["left"],
monitor["top"],
monitor["width"],
monitor["height"],
PLAINMASK,
ZPIXMAP,
)
try:
bits_per_pixel = ximage.contents.bits_per_pixel
if bits_per_pixel != 32:
raise ScreenShotError(
"[XImage] bits per pixel value not (yet?) implemented: {}.".format(
bits_per_pixel
)
)
raw_data = ctypes.cast(
ximage.contents.data,
ctypes.POINTER(
ctypes.c_ubyte * monitor["height"] * monitor["width"] * 4
),
)
data = bytearray(raw_data.contents)
finally:
# Free
self.xlib.XDestroyImage(ximage)
return self.cls_image(data, monitor) | def grab(self, monitor) | Retrieve all pixels from a monitor. Pixels have to be RGB. | 3.551561 | 3.548736 | 1.000796 |
# type: (str) -> None
if os.path.isfile(fname):
newfile = fname + ".old"
print("{} -> {}".format(fname, newfile))
os.rename(fname, newfile) | def on_exists(fname) | Callback example when we try to overwrite an existing screenshot. | 3.856989 | 3.905735 | 0.987519 |
# type: (int, str, Callable[[str], None]) -> Iterator[str]
monitors = self.monitors
if not monitors:
raise ScreenShotError("No monitor found.")
if mon == 0:
# One screen shot by monitor
for idx, monitor in enumerate(monitors[1:], 1):
fname = output.format(mon=idx, date=datetime.now(), **monitor)
if callable(callback):
callback(fname)
sct = self.grab(monitor)
to_png(sct.rgb, sct.size, level=self.compression_level, output=fname)
yield fname
else:
# A screen shot of all monitors together or
# a screen shot of the monitor N.
mon = 0 if mon == -1 else mon
try:
monitor = monitors[mon]
except IndexError:
raise ScreenShotError("Monitor {!r} does not exist.".format(mon))
output = output.format(mon=mon, date=datetime.now(), **monitor)
if callable(callback):
callback(output)
sct = self.grab(monitor)
to_png(sct.rgb, sct.size, level=self.compression_level, output=output)
yield output | def save(self, mon=0, output="monitor-{mon}.png", callback=None) | Grab a screen shot and save it to a file.
:param int mon: The monitor to screen shot (default=0).
-1: grab one screen shot of all monitors
0: grab one screen shot by monitor
N: grab the screen shot of the monitor N
:param str output: The output filename.
It can take several keywords to customize the filename:
- `{mon}`: the monitor number
- `{top}`: the screen shot y-coordinate of the upper-left corner
- `{left}`: the screen shot x-coordinate of the upper-left corner
- `{width}`: the screen shot's width
- `{height}`: the screen shot's height
- `{date}`: the current date using the default formatter
As it is using the `format()` function, you can specify
formatting options like `{date:%Y-%m-%s}`.
:param callable callback: Callback called before saving the
screen shot to a file. Take the `output` argument as parameter.
:return generator: Created file(s). | 3.064772 | 2.800916 | 1.094204 |
# type: (Any) -> str
kwargs["mon"] = kwargs.get("mon", 1)
return next(self.save(**kwargs)) | def shot(self, **kwargs) | Helper to save the screen shot of the 1st monitor, by default.
You can pass the same arguments as for ``save``. | 13.738683 | 10.68077 | 1.286301 |
# type: (Any, str, List[Any], Any, Optional[Callable]) -> None
meth = getattr(attr, func)
meth.argtypes = argtypes
meth.restype = restype
if errcheck:
meth.errcheck = errcheck | def _cfactory(attr, func, argtypes, restype, errcheck=None) | Factory to create a ctypes function and automatically manage errors. | 3.166888 | 3.655634 | 0.866303 |
void = ctypes.c_void_p
pointer = ctypes.POINTER
self._cfactory(
attr=self.user32, func="GetSystemMetrics", argtypes=[INT], restype=INT
)
self._cfactory(
attr=self.user32,
func="EnumDisplayMonitors",
argtypes=[HDC, void, self.monitorenumproc, LPARAM],
restype=BOOL,
)
self._cfactory(
attr=self.user32, func="GetWindowDC", argtypes=[HWND], restype=HDC
)
self._cfactory(
attr=self.gdi32, func="GetDeviceCaps", argtypes=[HWND, INT], restype=INT
)
self._cfactory(
attr=self.gdi32, func="CreateCompatibleDC", argtypes=[HDC], restype=HDC
)
self._cfactory(
attr=self.gdi32,
func="CreateCompatibleBitmap",
argtypes=[HDC, INT, INT],
restype=HBITMAP,
)
self._cfactory(
attr=self.gdi32,
func="SelectObject",
argtypes=[HDC, HGDIOBJ],
restype=HGDIOBJ,
)
self._cfactory(
attr=self.gdi32,
func="BitBlt",
argtypes=[HDC, INT, INT, INT, INT, HDC, INT, INT, DWORD],
restype=BOOL,
)
self._cfactory(
attr=self.gdi32, func="DeleteObject", argtypes=[HGDIOBJ], restype=INT
)
self._cfactory(
attr=self.gdi32,
func="GetDIBits",
argtypes=[HDC, HBITMAP, UINT, UINT, void, pointer(BITMAPINFO), UINT],
restype=BOOL,
) | def _set_cfunctions(self) | Set all ctypes functions and attach them to attributes. | 1.937356 | 1.883621 | 1.028527 |
version = sys.getwindowsversion()[:2] # pylint: disable=no-member
if version >= (6, 3):
# Windows 8.1+
# Here 2 = PROCESS_PER_MONITOR_DPI_AWARE, which means:
# per monitor DPI aware. This app checks for the DPI when it is
# created and adjusts the scale factor whenever the DPI changes.
# These applications are not automatically scaled by the system.
ctypes.windll.shcore.SetProcessDpiAwareness(2)
elif (6, 0) <= version < (6, 3):
# Windows Vista, 7, 8 and Server 2012
self.user32.SetProcessDPIAware() | def _set_dpi_awareness(self) | Set DPI aware to capture full screen on Hi-DPI monitors. | 6.138725 | 5.757597 | 1.066196 |
# type: () -> Monitors
if not self._monitors:
int_ = int
user32 = self.user32
get_system_metrics = user32.GetSystemMetrics
# All monitors
self._monitors.append(
{
"left": int_(get_system_metrics(76)), # SM_XVIRTUALSCREEN
"top": int_(get_system_metrics(77)), # SM_YVIRTUALSCREEN
"width": int_(get_system_metrics(78)), # SM_CXVIRTUALSCREEN
"height": int_(get_system_metrics(79)), # SM_CYVIRTUALSCREEN
}
)
# Each monitors
def _callback(monitor, data, rect, dc_):
# types: (int, HDC, LPRECT, LPARAM) -> int
# pylint: disable=unused-argument
rct = rect.contents
self._monitors.append(
{
"left": int_(rct.left),
"top": int_(rct.top),
"width": int_(rct.right - rct.left),
"height": int_(rct.bottom - rct.top),
}
)
return 1
callback = self.monitorenumproc(_callback)
user32.EnumDisplayMonitors(0, 0, callback, 0)
return self._monitors | def monitors(self) | Get positions of monitors (see parent class). | 2.953493 | 2.916119 | 1.012816 |
# type: (Monitor) -> ScreenShot
# Convert PIL bbox style
if isinstance(monitor, tuple):
monitor = {
"left": monitor[0],
"top": monitor[1],
"width": monitor[2] - monitor[0],
"height": monitor[3] - monitor[1],
}
srcdc, memdc = MSS.srcdc, MSS.memdc
width, height = monitor["width"], monitor["height"]
if (self._bbox["height"], self._bbox["width"]) != (height, width):
self._bbox = monitor
self._bmi.bmiHeader.biWidth = width
self._bmi.bmiHeader.biHeight = -height # Why minus? [1]
self._data = ctypes.create_string_buffer(width * height * 4) # [2]
if MSS.bmp:
self.gdi32.DeleteObject(MSS.bmp)
MSS.bmp = self.gdi32.CreateCompatibleBitmap(srcdc, width, height)
self.gdi32.SelectObject(memdc, MSS.bmp)
self.gdi32.BitBlt(
memdc,
0,
0,
width,
height,
srcdc,
monitor["left"],
monitor["top"],
SRCCOPY | CAPTUREBLT,
)
bits = self.gdi32.GetDIBits(
memdc, MSS.bmp, 0, height, self._data, self._bmi, DIB_RGB_COLORS
)
if bits != height:
raise ScreenShotError("gdi32.GetDIBits() failed.")
return self.cls_image(bytearray(self._data), monitor) | def grab(self, monitor) | Retrieve all pixels from a monitor. Pixels have to be RGB.
In the code, there are few interesting things:
[1] bmi.bmiHeader.biHeight = -height
A bottom-up DIB is specified by setting the height to a
positive number, while a top-down DIB is specified by
setting the height to a negative number.
https://msdn.microsoft.com/en-us/library/ms787796.aspx
https://msdn.microsoft.com/en-us/library/dd144879%28v=vs.85%29.aspx
[2] bmi.bmiHeader.biBitCount = 32
image_data = create_string_buffer(height * width * 4)
We grab the image in RGBX mode, so that each word is 32bit
and we have no striding, then we transform to RGB.
Inspired by https://github.com/zoofIO/flexx
[3] bmi.bmiHeader.biClrUsed = 0
bmi.bmiHeader.biClrImportant = 0
When biClrUsed and biClrImportant are set to zero, there
is "no" color table, so we can read the pixels of the bitmap
retrieved by gdi32.GetDIBits() as a sequence of RGB values.
Thanks to http://stackoverflow.com/a/3688682 | 3.278704 | 3.133801 | 1.046238 |
# type: (Any) -> MSSMixin
os_ = platform.system().lower()
if os_ == "darwin":
from . import darwin
return darwin.MSS(**kwargs)
if os_ == "linux":
from . import linux
return linux.MSS(**kwargs)
if os_ == "windows":
from . import windows
return windows.MSS(**kwargs)
raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_)) | def mss(**kwargs) | Factory returning a proper MSS class instance.
It detects the plateform we are running on
and choose the most adapted mss_class to take
screenshots.
It then proxies its arguments to the class for
instantiation. | 3.500383 | 3.648004 | 0.959534 |
while vH < 0: vH += 1
while vH > 1: vH -= 1
if 6 * vH < 1: return v1 + (v2 - v1) * 6 * vH
if 2 * vH < 1: return v2
if 3 * vH < 2: return v1 + (v2 - v1) * ((2.0 / 3) - vH) * 6
return v1 | def _hue2rgb(v1, v2, vH) | Private helper function (Do not call directly)
:param vH: rotation around the chromatic circle (between 0..1) | 1.51194 | 1.618844 | 0.933962 |
hx = ''.join(["%02x" % int(c * 255 + 0.5 - FLOAT_ERROR)
for c in rgb])
if not force_long and hx[0::2] == hx[1::2]:
hx = ''.join(hx[0::2])
return "#%s" % hx | def rgb2hex(rgb, force_long=False) | Transform RGB tuple to hex RGB representation
:param rgb: RGB 3-uple of float between 0 and 1
:rtype: 3 hex char or 6 hex char string representation
Usage
-----
>>> from colour import rgb2hex
>>> rgb2hex((0.0,1.0,0.0))
'#0f0'
Rounding try to be as natural as possible:
>>> rgb2hex((0.0,0.999999,1.0))
'#0ff'
And if not possible, the 6 hex char representation is used:
>>> rgb2hex((0.23,1.0,1.0))
'#3bffff'
>>> rgb2hex((0.0,0.999999,1.0), force_long=True)
'#00ffff' | 4.153358 | 5.289968 | 0.785138 |
try:
rgb = str_rgb[1:]
if len(rgb) == 6:
r, g, b = rgb[0:2], rgb[2:4], rgb[4:6]
elif len(rgb) == 3:
r, g, b = rgb[0] * 2, rgb[1] * 2, rgb[2] * 2
else:
raise ValueError()
except:
raise ValueError("Invalid value %r provided for rgb color."
% str_rgb)
return tuple([float(int(v, 16)) / 255 for v in (r, g, b)]) | def hex2rgb(str_rgb) | Transform hex RGB representation to RGB tuple
:param str_rgb: 3 hex char or 6 hex char string representation
:rtype: RGB 3-uple of float between 0 and 1
>>> from colour import hex2rgb
>>> hex2rgb('#00ff00')
(0.0, 1.0, 0.0)
>>> hex2rgb('#0f0')
(0.0, 1.0, 0.0)
>>> hex2rgb('#aaa') # doctest: +ELLIPSIS
(0.66..., 0.66..., 0.66...)
>>> hex2rgb('#aa') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Invalid value '#aa' provided for rgb color. | 2.050815 | 1.894263 | 1.082646 |
dec_rgb = tuple(int(v * 255) for v in hex2rgb(hex))
if dec_rgb in RGB_TO_COLOR_NAMES:
## take the first one
color_name = RGB_TO_COLOR_NAMES[dec_rgb][0]
## Enforce full lowercase for single worded color name.
return color_name if len(re.sub(r"[^A-Z]", "", color_name)) > 1 \
else color_name.lower()
# Hex format is verified by hex2rgb function. And should be 3 or 6 digit
if len(hex) == 7:
if hex[1] == hex[2] and \
hex[3] == hex[4] and \
hex[5] == hex[6]:
return '#' + hex[1] + hex[3] + hex[5]
return hex | def hex2web(hex) | Converts HEX representation to WEB
:param rgb: 3 hex char or 6 hex char string representation
:rtype: web string representation (human readable if possible)
WEB representation uses X11 rgb.txt to define conversion
between RGB and english color names.
Usage
=====
>>> from colour import hex2web
>>> hex2web('#ff0000')
'red'
>>> hex2web('#aaaaaa')
'#aaa'
>>> hex2web('#abc')
'#abc'
>>> hex2web('#acacac')
'#acacac' | 4.268029 | 4.326445 | 0.986498 |
if web.startswith('#'):
if (LONG_HEX_COLOR.match(web) or
(not force_long and SHORT_HEX_COLOR.match(web))):
return web.lower()
elif SHORT_HEX_COLOR.match(web) and force_long:
return '#' + ''.join([("%s" % (t, )) * 2 for t in web[1:]])
raise AttributeError(
"%r is not in web format. Need 3 or 6 hex digit." % web)
web = web.lower()
if web not in COLOR_NAME_TO_RGB:
raise ValueError("%r is not a recognized color." % web)
## convert dec to hex:
return rgb2hex([float(int(v)) / 255 for v in COLOR_NAME_TO_RGB[web]],
force_long) | def web2hex(web, force_long=False) | Converts WEB representation to HEX
:param rgb: web string representation (human readable if possible)
:rtype: 3 hex char or 6 hex char string representation
WEB representation uses X11 rgb.txt to define conversion
between RGB and english color names.
Usage
=====
>>> from colour import web2hex
>>> web2hex('red')
'#f00'
>>> web2hex('#aaa')
'#aaa'
>>> web2hex('#foo') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: '#foo' is not in web format. Need 3 or 6 hex digit.
>>> web2hex('#aaa', force_long=True)
'#aaaaaa'
>>> web2hex('#aaaaaa')
'#aaaaaa'
>>> web2hex('#aaaa') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: '#aaaa' is not in web format. Need 3 or 6 hex digit.
>>> web2hex('pinky') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: 'pinky' is not a recognized color.
And color names are case insensitive:
>>> Color('RED')
<Color red> | 4.136449 | 3.882807 | 1.065324 |
if nb < 0:
raise ValueError(
"Unsupported negative number of colors (nb=%r)." % nb)
step = tuple([float(end_hsl[i] - begin_hsl[i]) / nb for i in range(0, 3)]) \
if nb > 0 else (0, 0, 0)
def mul(step, value):
return tuple([v * value for v in step])
def add_v(step, step2):
return tuple([v + step2[i] for i, v in enumerate(step)])
return [add_v(begin_hsl, mul(step, r)) for r in range(0, nb + 1)] | def color_scale(begin_hsl, end_hsl, nb) | Returns a list of nb color HSL tuples between begin_hsl and end_hsl
>>> from colour import color_scale
>>> [rgb2hex(hsl2rgb(hsl)) for hsl in color_scale((0, 1, 0.5),
... (1, 1, 0.5), 3)]
['#f00', '#0f0', '#00f', '#f00']
>>> [rgb2hex(hsl2rgb(hsl))
... for hsl in color_scale((0, 0, 0),
... (0, 0, 1),
... 15)] # doctest: +ELLIPSIS
['#000', '#111', '#222', ..., '#ccc', '#ddd', '#eee', '#fff']
Of course, asking for negative values is not supported:
>>> color_scale((0, 1, 0.5), (1, 1, 0.5), -2)
Traceback (most recent call last):
...
ValueError: Unsupported negative number of colors (nb=-2). | 3.143154 | 2.838413 | 1.107363 |
## Turn the input into a by 3-dividable string. SHA-384 is good because it
## divides into 3 components of the same size, which will be used to
## represent the RGB values of the color.
digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest()
## Split the digest into 3 sub-strings of equivalent size.
subsize = int(len(digest) / 3)
splitted_digest = [digest[i * subsize: (i + 1) * subsize]
for i in range(3)]
## Convert those hexadecimal sub-strings into integer and scale them down
## to the 0..1 range.
max_value = float(int("f" * subsize, 16))
components = (
int(d, 16) ## Make a number from a list with hex digits
/ max_value ## Scale it down to [0.0, 1.0]
for d in splitted_digest)
return Color(rgb2hex(components)) | def RGB_color_picker(obj) | Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True | 5.750794 | 5.881786 | 0.977729 |
# type: (List[AnyStr], Optional[AnyStr], Optional[Mapping[S, S]]) -> None
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
run(
cmd,
cwd=cwd,
env=env,
block=True,
combine_stderr=True,
return_object=False,
write_to_stdout=False,
nospin=True,
) | def pep517_subprocess_runner(cmd, cwd=None, extra_environ=None) | The default method of calling the wrapper subprocess. | 3.445209 | 3.678582 | 0.936559 |
# type: (str, Optional[str]) -> Distribution
if not os.path.exists(script_path):
raise FileNotFoundError(script_path)
target_cwd = os.path.dirname(os.path.abspath(script_path))
if egg_base is None:
egg_base = os.path.join(target_cwd, "reqlib-metadata")
with temp_path(), cd(target_cwd), _suppress_distutils_logs():
# This is for you, Hynek
# see https://github.com/hynek/environ_config/blob/69b1c8a/setup.py
args = ["egg_info"]
if egg_base:
args += ["--egg-base", egg_base]
script_name = os.path.basename(script_path)
g = {"__file__": script_name, "__name__": "__main__"}
sys.path.insert(0, target_cwd)
local_dict = {}
if sys.version_info < (3, 5):
save_argv = sys.argv
else:
save_argv = sys.argv.copy()
try:
global _setup_distribution, _setup_stop_after
_setup_stop_after = "run"
sys.argv[0] = script_name
sys.argv[1:] = args
with open(script_name, "rb") as f:
contents = f.read()
if six.PY3:
contents.replace(br"\r\n", br"\n")
else:
contents.replace(r"\r\n", r"\n")
if sys.version_info < (3, 5):
exec(contents, g, local_dict)
else:
exec(contents, g)
# We couldn't import everything needed to run setup
except Exception:
python = os.environ.get("PIP_PYTHON_PATH", sys.executable)
out, _ = run(
[python, "setup.py"] + args,
cwd=target_cwd,
block=True,
combine_stderr=False,
return_object=False,
nospin=True,
)
finally:
_setup_stop_after = None
sys.argv = save_argv
_setup_distribution = get_metadata(egg_base, metadata_type="egg")
dist = _setup_distribution
return dist | def run_setup(script_path, egg_base=None) | Run a `setup.py` script with a target **egg_base** if provided.
:param S script_path: The path to the `setup.py` script to run
:param Optional[S] egg_base: The metadata directory to build in
:raises FileNotFoundError: If the provided `script_path` does not exist
:return: The metadata dictionary
:rtype: Dict[Any, Any] | 3.257938 | 3.351026 | 0.972221 |
ctx.run(f"python setup.py clean")
dist = ROOT.joinpath("dist")
build = ROOT.joinpath("build")
print(f"[clean] Removing {dist} and {build}")
if dist.exists():
shutil.rmtree(str(dist))
if build.exists():
shutil.rmtree(str(build)) | def clean(ctx) | Clean previously built package artifacts. | 2.741307 | 2.593988 | 1.056793 |
if prebump not in REL_TYPES:
raise ValueError(f"{type_} not in {REL_TYPES}")
prebump = REL_TYPES.index(prebump)
version = bump_version(ctx, type_, log=True)
# Needs to happen before Towncrier deletes fragment files.
tag_release(version, yes=yes)
ctx.run(f"python setup.py sdist bdist_wheel")
dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*'
artifacts = list(ROOT.joinpath("dist").glob(dist_pattern))
filename_display = "\n".join(f" {a}" for a in artifacts)
print(f"[release] Will upload:\n{filename_display}")
if not yes:
try:
input("[release] Release ready. ENTER to upload, CTRL-C to abort: ")
except KeyboardInterrupt:
print("\nAborted!")
return
arg_display = " ".join(f'"{n}"' for n in artifacts)
ctx.run(f'twine upload --repository="{repo}" {arg_display}')
version = _prebump(version, prebump)
_write_version(version)
ctx.run(f'git commit -am "Prebump to {version}"') | def release(ctx, type_, repo, prebump=PREBUMP, yes=False) | Make a new release. | 4.982463 | 4.912604 | 1.01422 |
filepath = pathlib.Path(filepath)
if not filepath.is_file():
log("profile", f"no such script {filepath!s}", LogLevel.ERROR)
else:
if calltree:
log("profile", f"profiling script {filepath!s} calltree")
ctx.run(
(
f"python -m cProfile -o .profile.cprof {filepath!s}"
" && pyprof2calltree -k -i .profile.cprof"
" && rm -rf .profile.cprof"
)
)
else:
log("profile", f"profiling script {filepath!s}")
ctx.run(f"vprof -c cmhp {filepath!s}") | def profile(ctx, filepath, calltree=False) | Run and profile a given Python script.
:param str filepath: The filepath of the script to profile | 3.44794 | 3.496806 | 0.986026 |
extra_indexes = []
preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"]
for i, element in enumerate(elements):
if isinstance(element, list):
cancelled = _strip_marker_elem(elem_name, element)
if cancelled:
extra_indexes.append(i)
elif isinstance(element, tuple) and element[0].value == elem_name:
extra_indexes.append(i)
for i in reversed(extra_indexes):
del elements[i]
if i > 0 and elements[i - 1] in preceding_operators:
# Remove the "and" before it.
del elements[i - 1]
elif elements:
# This shouldn't ever happen, but is included for completeness.
# If there is not an "and" before this element, try to remove the
# operator after it.
del elements[0]
return not elements | def _strip_marker_elem(elem_name, elements) | Remove the supplied element from the marker.
This is not a comprehensive implementation, but relies on an important
characteristic of metadata generation: The element's operand is always
associated with an "and" operator. This means that we can simply remove the
operand and the "and" operator associated with it. | 3.695964 | 3.368212 | 1.097307 |
if not marker:
return None
marker = _ensure_marker(marker)
elements = marker._markers
strip_func(elements)
if elements:
return marker
return None | def _get_stripped_marker(marker, strip_func) | Build a new marker which is cleaned according to `strip_func` | 6.402431 | 5.856784 | 1.093165 |
if not marker:
return set()
extras = set()
marker = _ensure_marker(marker)
_markers_collect_extras(marker._markers, extras)
return extras | def get_contained_extras(marker) | Collect "extra == ..." operands from a marker.
Returns a list of str. Each str is a speficied extra in this marker. | 5.727271 | 7.070555 | 0.810017 |
collection = []
if not marker:
return set()
marker = _ensure_marker(marker)
# Collect the (Variable, Op, Value) tuples and string joiners from the marker
_markers_collect_pyversions(marker._markers, collection)
marker_str = " and ".join(sorted(collection))
if not marker_str:
return set()
# Use the distlib dictionary parser to create a dictionary 'trie' which is a bit
# easier to reason about
marker_dict = distlib.markers.parse_marker(marker_str)[0]
version_set = set()
pyversions, _ = parse_marker_dict(marker_dict)
if isinstance(pyversions, set):
version_set.update(pyversions)
elif pyversions is not None:
version_set.add(pyversions)
# Each distinct element in the set was separated by an "and" operator in the marker
# So we will need to reduce them with an intersection here rather than a union
# in order to find the boundaries
versions = set()
if version_set:
versions = reduce(lambda x, y: x & y, version_set)
return versions | def get_contained_pyversions(marker) | Collect all `python_version` operands from a marker. | 6.351284 | 5.913258 | 1.074075 |
if not marker:
return False
marker = _ensure_marker(marker)
return _markers_contains_extra(marker._markers) | def contains_extra(marker) | Check whehter a marker contains an "extra == ..." operand. | 6.44338 | 6.441805 | 1.000244 |
if not marker:
return False
marker = _ensure_marker(marker)
return _markers_contains_pyversion(marker._markers) | def contains_pyversion(marker) | Check whether a marker contains a python_version operand. | 7.11451 | 6.145157 | 1.157742 |
text = item.read_text(encoding='utf-8')
renames = LIBRARY_RENAMES
for k in LIBRARY_RENAMES.keys():
if k not in vendored_libs:
vendored_libs.append(k)
for lib in vendored_libs:
to_lib = lib
if lib in renames:
to_lib = renames[lib]
text = re.sub(
r'([\n\s]*)import %s([\n\s\.]+)' % lib,
r'\1import %s\2' % to_lib,
text,
)
text = re.sub(
r'([\n\s]*)from %s([\s\.])+' % lib,
r'\1from %s\2' % to_lib,
text,
)
text = re.sub(
r"(\n\s*)__import__\('%s([\s'\.])+" % lib,
r"\1__import__('%s\2" % to_lib,
text,
)
item.write_text(text, encoding='utf-8') | def rewrite_file_imports(item, vendored_libs, vendor_dir) | Rewrite 'import xxx' and 'from xxx import' for vendored_libs | 2.263871 | 2.208043 | 1.025284 |
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R | def distance(a, b) | Calculates distance between two latitude-longitude coordinates. | 1.746937 | 1.580743 | 1.105136 |
e = 0
for i in range(len(self.state)):
e += self.distance_matrix[self.state[i-1]][self.state[i]]
return e | def energy(self) | Calculates the length of the route. | 3.414677 | 2.678255 | 1.274963 |
return round(x, int(n - math.ceil(math.log10(abs(x))))) | def round_figures(x, n) | Returns x rounded to n significant figures. | 3.336861 | 3.638318 | 0.917144 |
if not fname:
date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
fname = date + "_energy_" + str(self.energy()) + ".state"
with open(fname, "wb") as fh:
pickle.dump(self.state, fh) | def save_state(self, fname=None) | Saves state to pickle | 3.083598 | 3.116329 | 0.989497 |
with open(fname, 'rb') as fh:
self.state = pickle.load(fh) | def load_state(self, fname=None) | Loads state from pickle | 3.230803 | 3.167757 | 1.019902 |
self.Tmax = schedule['tmax']
self.Tmin = schedule['tmin']
self.steps = int(schedule['steps'])
self.updates = int(schedule['updates']) | def set_schedule(self, schedule) | Takes the output from `auto` and sets the attributes | 3.583896 | 3.335516 | 1.074465 |
if self.copy_strategy == 'deepcopy':
return copy.deepcopy(state)
elif self.copy_strategy == 'slice':
return state[:]
elif self.copy_strategy == 'method':
return state.copy()
else:
raise RuntimeError('No implementation found for ' +
'the self.copy_strategy "%s"' %
self.copy_strategy) | def copy_state(self, state) | Returns an exact copy of the provided state
Implemented according to self.copy_strategy, one of
* deepcopy : use copy.deepcopy (slow but reliable)
* slice: use list slices (faster but only works if state is list-like)
* method: use the state's copy() method | 3.310909 | 2.29921 | 1.440021 |
elapsed = time.time() - self.start
if step == 0:
print(' Temperature Energy Accept Improve Elapsed Remaining',
file=sys.stderr)
print('\r%12.5f %12.2f %s ' %
(T, E, time_string(elapsed)), file=sys.stderr, end="\r")
sys.stderr.flush()
else:
remain = (self.steps - step) * (elapsed / step)
print('\r%12.5f %12.2f %7.2f%% %7.2f%% %s %s\r' %
(T, E, 100.0 * acceptance, 100.0 * improvement,
time_string(elapsed), time_string(remain)), file=sys.stderr, end="\r")
sys.stderr.flush() | def default_update(self, step, T, E, acceptance, improvement) | Default update, outputs to stderr.
Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible. | 2.869964 | 2.605627 | 1.101448 |
step = 0
self.start = time.time()
# Precompute factor for exponential cooling from Tmax to Tmin
if self.Tmin <= 0.0:
raise Exception('Exponential cooling requires a minimum "\
"temperature greater than zero.')
Tfactor = -math.log(self.Tmax / self.Tmin)
# Note initial state
T = self.Tmax
E = self.energy()
prevState = self.copy_state(self.state)
prevEnergy = E
self.best_state = self.copy_state(self.state)
self.best_energy = E
trials, accepts, improves = 0, 0, 0
if self.updates > 0:
updateWavelength = self.steps / self.updates
self.update(step, T, E, None, None)
# Attempt moves to new states
while step < self.steps and not self.user_exit:
step += 1
T = self.Tmax * math.exp(Tfactor * step / self.steps)
self.move()
E = self.energy()
dE = E - prevEnergy
trials += 1
if dE > 0.0 and math.exp(-dE / T) < random.random():
# Restore previous state
self.state = self.copy_state(prevState)
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
prevState = self.copy_state(self.state)
prevEnergy = E
if E < self.best_energy:
self.best_state = self.copy_state(self.state)
self.best_energy = E
if self.updates > 1:
if (step // updateWavelength) > ((step - 1) // updateWavelength):
self.update(
step, T, E, accepts / trials, improves / trials)
trials, accepts, improves = 0, 0, 0
self.state = self.copy_state(self.best_state)
if self.save_state_on_exit:
self.save_state()
# Return best state and energy
return self.best_state, self.best_energy | def anneal(self) | Minimizes the energy of a system by simulated annealing.
Parameters
state : an initial arrangement of the system
Returns
(state, energy): the best state and energy found. | 3.048275 | 3.054043 | 0.998112 |
def run(T, steps):
E = self.energy()
prevState = self.copy_state(self.state)
prevEnergy = E
accepts, improves = 0, 0
for _ in range(steps):
self.move()
E = self.energy()
dE = E - prevEnergy
if dE > 0.0 and math.exp(-dE / T) < random.random():
self.state = self.copy_state(prevState)
E = prevEnergy
else:
accepts += 1
if dE < 0.0:
improves += 1
prevState = self.copy_state(self.state)
prevEnergy = E
return E, float(accepts) / steps, float(improves) / steps
step = 0
self.start = time.time()
# Attempting automatic simulated anneal...
# Find an initial guess for temperature
T = 0.0
E = self.energy()
self.update(step, T, E, None, None)
while T == 0.0:
step += 1
self.move()
T = abs(self.energy() - E)
# Search for Tmax - a temperature that gives 98% acceptance
E, acceptance, improvement = run(T, steps)
step += steps
while acceptance > 0.98:
T = round_figures(T / 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
while acceptance < 0.98:
T = round_figures(T * 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
Tmax = T
# Search for Tmin - a temperature that gives 0% improvement
while improvement > 0.0:
T = round_figures(T / 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
Tmin = T
# Calculate anneal duration
elapsed = time.time() - self.start
duration = round_figures(int(60.0 * minutes * step / elapsed), 2)
# Don't perform anneal, just return params
return {'tmax': Tmax, 'tmin': Tmin, 'steps': duration, 'updates': self.updates} | def auto(self, minutes, steps=2000) | Explores the annealing landscape and
estimates optimal temperature settings.
Returns a dictionary suitable for the `set_schedule` method. | 2.947127 | 2.966124 | 0.993595 |
def load(self, shapefile=None):
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader() | Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument. | null | null | null |
|
def __shpHeader(self):
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16))) | Reads the header information from a .shp or .shx file. | null | null | null |
|
def __shape(self):
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values
if shapeType in (13,15,18,23,25,28,31):
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
return record | Returns the header info and geometry for a single shape. | null | null | null |
|
def __shapeIndex(self, i=None):
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i] | Returns the offset in a .shp file for a shape based on information
in the .shx index file. | null | null | null |
|
def shape(self, i=0):
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so use the full list.
shapes = self.shapes()
return shapes[i]
shp.seek(offset)
return self.__shape() | Returns a shape object for a shape in the the geometry
record file. | null | null | null |
|
def shapes(self):
shp = self.__getFileObj(self.shp)
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes | Returns all shapes in a shapefile. | null | null | null |
|
def __dbfHeaderLength(self):
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength | Retrieves the header length of a dbf file header. | null | null | null |
|
def __dbfHeader(self):
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0)) | Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger | null | null | null |
|
def __recordFmt(self):
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize) | Calculates the size of a .shp geometry record. | null | null | null |
|
def __record(self):
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
try:
value = float(value)
except ValueError:
value = 0
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record | Reads and returns a dbf record row as a list of values. | null | null | null |
|
def record(self, i=0):
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record() | Returns a specific dbf record based on the supplied index. | null | null | null |
|
def records(self):
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records | Returns all records in a dbf file. | null | null | null |
|
def shapeRecord(self, i=0):
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i)) | Returns a combination geometry and attribute record for the
supplied record index. | null | null | null |
|
def shapeRecords(self):
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())] | Returns a list of combination geometry/attribute records for
all records in a shapefile. | null | null | null |
|
def __shpFileLength(self):
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size | Calculates the file length of the shp file. | null | null | null |
|
def __shapefileHeader(self, fileObj, headerType='shp'):
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.") | Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted. | null | null | null |
|
def __dbfHeader(self):
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r')) | Writes the dbf header and field descriptors. | null | null | null |
|
def __shxRecords(self):
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i])) | Writes the shx records. | null | null | null |
|
def __dbfRecords(self):
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value) | Writes the dbf records. | null | null | null |
|
def point(self, x, y, z=0, m=0):
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape) | Creates a point shape. | null | null | null |
|
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape) | Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type. | null | null | null |
|
def field(self, name, fieldType="C", size="50", decimal=0):
self.fields.append((name, fieldType, size, decimal)) | Adds a dbf field descriptor to the shapefile. | null | null | null |
|
def record(self, *recordList, **recordDict):
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val:
record.append(val)
else:
record.append("")
if record:
self.records.append(record) | Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added. | null | null | null |
|
def saveShp(self, target):
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords() | Save an shp file. | null | null | null |
|
def saveShx(self, target):
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords() | Save an shx file. | null | null | null |
|
def saveDbf(self, target):
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords() | Save a dbf file. | null | null | null |
|
def save(self, target=None, shp=None, shx=None, dbf=None):
# TODO: Create a unique filename for target if None.
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif target:
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close() | Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively. | null | null | null |
|
def delete(self, shape=None, part=None, point=None):
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part] | Deletes the specified part of any shape by specifying a shape
number, part number, or point number. | null | null | null |
|
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance() | Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type. | null | null | null |
|
def balance(self):
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record() | Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch. | null | null | null |
|
def __fieldNorm(self, fieldName):
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_') | Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software. | null | null | null |
|
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxsize
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs | def diff_main(self, text1, text2, checklines=True, deadline=None) | Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes. | 2.095952 | 1.799695 | 1.164615 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.