code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if "flight_id" not in self.data.columns:
return None
tmp = set(self.data.flight_id)
if len(tmp) != 1:
logging.warn("Several ids for one flight, consider splitting")
return tmp.pop()
|
def flight_id(self) -> Optional[str]
|
Returns the unique flight_id value(s) of the DataFrame.
If you know how to split flights, you may want to append such a column
in the DataFrame.
| 4.47168 | 4.695765 | 0.952279 |
return set(self.data.squawk.ffill().bfill())
|
def squawk(self) -> Set[str]
|
Returns all the unique squawk values in the trajectory.
| 18.053698 | 9.7081 | 1.859653 |
if "destination" not in self.data.columns:
return None
tmp = set(self.data.destination)
if len(tmp) == 1:
return tmp.pop()
logging.warn("Several destinations for one flight, consider splitting")
return tmp
|
def destination(self) -> Optional[Union[str, Set[str]]]
|
Returns the unique destination value(s) of the DataFrame.
The destination airport is mostly represented as a ICAO or a IATA code.
| 4.764907 | 4.722827 | 1.00891 |
from ..data import opensky
query_params = {
"start": self.start,
"stop": self.stop,
"callsign": self.callsign,
"icao24": self.icao24,
}
return opensky.history(**query_params)
|
def query_opensky(self) -> Optional["Flight"]
|
Return the equivalent Flight from OpenSky History.
| 3.64713 | 3.158001 | 1.154886 |
from ..data import opensky, ModeS_Decoder
if not isinstance(self.icao24, str):
raise RuntimeError("Several icao24 for this flight")
def fail_warning():
id_ = self.flight_id
if id_ is None:
id_ = self.callsign
logging.warn(f"No data on Impala for flight {id_}.")
return self
def fail_silent():
return self
failure_dict = dict(warning=fail_warning, silent=fail_silent)
failure = failure_dict[failure_mode]
if data is None:
df = opensky.extended(self.start, self.stop, icao24=self.icao24)
else:
df = data.query("icao24 == @self.icao24").sort_values("mintime")
if df is None:
return failure()
timestamped_df = df.sort_values("mintime").assign(
timestamp=lambda df: df.mintime.dt.round("s")
)
referenced_df = (
timestamped_df.merge(self.data, on="timestamp", how="outer")
.sort_values("timestamp")
.rename(
columns=dict(
altitude="alt",
altitude_y="alt",
groundspeed="spd",
track="trk",
)
)[["timestamp", "alt", "spd", "trk"]]
.ffill()
.drop_duplicates() # bugfix! NEVER ERASE THAT LINE!
.merge(
timestamped_df[["timestamp", "icao24", "rawmsg"]],
on="timestamp",
how="right",
)
)
identifier = (
self.flight_id if self.flight_id is not None else self.callsign
)
# who cares about default lat0, lon0 with EHS
decoder = ModeS_Decoder((0, 0))
if progressbar is None:
progressbar = lambda x: tqdm( # noqa: E731
x,
total=referenced_df.shape[0],
desc=f"{identifier}:",
leave=False,
)
for _, line in progressbar(referenced_df.iterrows()):
decoder.process(
line.timestamp,
line.rawmsg,
spd=line.spd,
trk=line.trk,
alt=line.alt,
)
if decoder.traffic is None:
return failure()
extended = decoder.traffic[self.icao24]
if extended is None:
return failure()
# fix for https://stackoverflow.com/q/53657210/1595335
if "last_position" in self.data.columns:
extended = extended.assign(last_position=pd.NaT)
if "start" in self.data.columns:
extended = extended.assign(start=pd.NaT)
if "stop" in self.data.columns:
extended = extended.assign(stop=pd.NaT)
t = extended + self
if "flight_id" in self.data.columns:
t.data.flight_id = self.flight_id
# sometimes weird callsigns are decoded and should be discarded
# so it seems better to filter on callsign rather than on icao24
flight = t[self.callsign]
if flight is None:
return failure()
return flight.sort_values("timestamp")
|
def query_ehs(
self,
data: Optional[pd.DataFrame] = None,
failure_mode: str = "warning",
progressbar: Optional[Callable[[Iterable], Iterable]] = None,
) -> "Flight"
|
Extend data with extra columns from EHS messages.
By default, raw messages are requested from the OpenSky Impala server.
Making a lot of small requests can be very inefficient and may look
like a denial of service. If you get the raw messages using a different
channel, you can provide the resulting dataframe as a parameter.
The data parameter expect three colmuns: icao24, rawmsg and mintime, in
conformance with the OpenSky API.
| 3.829707 | 3.747866 | 1.021837 |
data = self.data[self.data.longitude.notnull()]
yield from zip(data["longitude"], data["latitude"], data["altitude"])
|
def coords(self) -> Iterator[Tuple[float, float, float]]
|
Iterates on longitudes, latitudes and altitudes.
| 5.041615 | 4.064302 | 1.240463 |
iterator = iter(zip(self.coords, self.timestamp))
while True:
next_ = next(iterator, None)
if next_ is None:
return
coords, time = next_
yield (coords[0], coords[1], time.to_pydatetime().timestamp())
|
def xy_time(self) -> Iterator[Tuple[float, float, float]]
|
Iterates on longitudes, latitudes and timestamps.
| 3.484452 | 3.113926 | 1.11899 |
# default value is 10 m
unit = "m"
for data in _split(self.data, value, unit):
yield self.__class__(data)
|
def split(self, value=10, unit=None): # noqa: F811
if type(value) == int and unit is None
|
Splits Flights in several legs.
By default, Flights are split if no value is given during 10 minutes.
| 7.533007 | 8.431903 | 0.893393 |
if isinstance(rule, str):
data = (
self._handle_last_position()
.data.assign(start=self.start, stop=self.stop)
.set_index("timestamp")
.resample(rule)
.first() # better performance than min() for duplicate index
.interpolate()
.reset_index()
.fillna(method="pad")
)
elif isinstance(rule, int):
data = (
self._handle_last_position()
.data.set_index("timestamp")
.asfreq(
(self.stop - self.start) / (rule - 1), # type: ignore
method="nearest",
)
.reset_index()
)
else:
raise TypeError("rule must be a str or an int")
return self.__class__(data)
|
def resample(self, rule: Union[str, int] = "1s") -> "Flight"
|
Resamples a Flight at a one point per second rate.
| 3.366786 | 3.31105 | 1.016833 |
# returns a mask
mask = douglas_peucker(
df=self.data,
tolerance=tolerance,
lat="latitude",
lon="longitude",
z=altitude,
z_factor=z_factor,
)
if return_type == Type["Flight"]:
return self.__class__(self.data.loc[mask])
else:
return mask
|
def simplify(
self,
tolerance: float,
altitude: Optional[str] = None,
z_factor: float = 3.048,
return_type: Type[Mask] = Type["Flight"],
) -> Mask
|
Simplifies a trajectory with Douglas-Peucker algorithm.
The method uses latitude and longitude, projects the trajectory to a
conformal projection and applies the algorithm. By default, a 2D version
is called, unless you pass a column name for altitude (z parameter). You
may scale the z-axis for more relevance (z_factor); the default value
works well in most situations.
The method returns a Flight unless you specify a np.ndarray[bool] as
return_type for getting a mask.
| 3.536094 | 3.218571 | 1.098654 |
def safe_exec(self, *args, **kwargs):
try:
return fn(self, *args, **kwargs)
except Exception as e:
logging.exception(e)
QMessageBox.information(
self, type(e).__name__, " ".join(str(x) for x in e.args)
)
return safe_exec
|
def dont_crash(fn)
|
Wraps callbacks: a simple information is raised in place of a program crash.
| 3.212049 | 3.127741 | 1.026955 |
return self.shape.bounds
|
def bounds(self) -> Tuple[float, float, float, float]
|
Returns the bounds of the shape.
Bounds are given in the following order in the origin crs:
west, south, east, north
| 48.594799 | 31.994637 | 1.518842 |
west, south, east, north = self.bounds
return west, east, south, north
|
def extent(self) -> Tuple[float, float, float, float]
|
Returns the extent of the shape.
Extent is given in the following order in the origin crs:
west, east, south, north
This method is convenient for the ax.set_extent method
| 4.928088 | 6.289154 | 0.783585 |
if self.shape is None:
return None
if isinstance(projection, crs.Projection):
projection = pyproj.Proj(projection.proj4_init)
if projection is None:
bounds = self.bounds
projection = pyproj.Proj(
proj="aea", # equivalent projection
lat_1=bounds[1],
lat_2=bounds[3],
lat_0=(bounds[1] + bounds[3]) / 2,
lon_0=(bounds[0] + bounds[2]) / 2,
)
projected_shape = transform(
partial(
pyproj.transform, pyproj.Proj(init="EPSG:4326"), projection
),
self.shape,
)
if not projected_shape.is_valid:
warnings.warn("The chosen projection is invalid for current shape")
return projected_shape
|
def project_shape(
self, projection: Union[pyproj.Proj, crs.Projection, None] = None
) -> base.BaseGeometry
|
Projection for a decent representation of the structure.
By default, an equivalent projection is applied. Equivalent projections
locally respect areas, which is convenient for the area attribute.
| 2.248177 | 2.150158 | 1.045587 |
if isinstance(projection, crs.Projection):
projection = pyproj.Proj(projection.proj4_init)
if projection is None:
projection = pyproj.Proj(
proj="lcc",
lat_1=self.data.latitude.min(),
lat_2=self.data.latitude.max(),
lat_0=self.data.latitude.mean(),
lon_0=self.data.longitude.mean(),
)
x, y = pyproj.transform(
pyproj.Proj(init="EPSG:4326"),
projection,
self.data.longitude.values,
self.data.latitude.values,
)
return self.__class__(self.data.assign(x=x, y=y))
|
def compute_xy(
self, projection: Union[pyproj.Proj, crs.Projection, None] = None
)
|
Computes x and y columns from latitudes and longitudes.
The source projection is WGS84 (EPSG 4326).
The default destination projection is a Lambert Conformal Conical
projection centered on the data inside the dataframe.
For consistency reasons with pandas DataFrame, a new Traffic structure
is returned.
| 1.710306 | 1.616495 | 1.058034 |
sub = self.data.query("callsign == callsign")
return set(cs for cs in sub.callsign if len(cs) > 3 and " " not in cs)
|
def callsigns(self) -> Set[str]
|
Return only the most relevant callsigns
| 6.409524 | 5.411791 | 1.184363 |
key = ["icao24", "callsign"] if self.flight_ids is None else "flight_id"
return (
self.data.groupby(key)[["timestamp"]]
.count()
.sort_values("timestamp", ascending=False)
.rename(columns={"timestamp": "count"})
)
|
def stats(self) -> pd.DataFrame
|
Statistics about flights contained in the structure.
Useful for a meaningful representation.
| 4.352489 | 3.657292 | 1.190085 |
with ProcessPoolExecutor(max_workers=max_workers) as executor:
cumul = []
tasks = {
executor.submit(flight.resample, rule): flight
for flight in self
}
for future in tqdm(as_completed(tasks), total=len(tasks)):
cumul.append(future.result())
return self.__class__.from_flights(cumul)
|
def resample(
self,
rule: Union[str, int] = "1s",
max_workers: int = 4,
) -> "Traffic"
|
Resamples all trajectories, flight by flight.
`rule` defines the desired sample rate (default: 1s)
| 2.626634 | 2.81494 | 0.933105 |
from ..algorithms.cpa import closest_point_of_approach
return closest_point_of_approach(
self,
lateral_separation,
vertical_separation,
projection,
round_t,
max_workers,
)
|
def closest_point_of_approach(
self,
lateral_separation: float,
vertical_separation: float,
projection: Union[pyproj.Proj, crs.Projection, None] = None,
round_t: str = "d",
max_workers: int = 4,
) -> "CPA"
|
Computes a CPA dataframe for all pairs of trajectories candidates for
being separated by less than lateral_separation in vertical_separation.
In order to be computed efficiently, the method needs the following
parameters:
- projection: a first filtering is applied on the bounding boxes of
trajectories, expressed in meters. You need to provide a decent
projection able to approximate distances by Euclide formula.
By default, EuroPP() projection is considered, but a non explicit
argument will raise a warning.
- round_t: an additional column will be added in the DataFrame to group
trajectories by relevant time frames. Distance computations will be
considered only between trajectories flown in the same time frame.
By default, the 'd' pandas freq parameter is considered, to group
trajectories by day, but other ways of splitting ('h') may be more
relevant and impact performance.
- max_workers: distance computations are spread over a given number of
processors.
| 2.008893 | 2.657072 | 0.756055 |
a = vvsound(h)
M = tas / a
return M
|
def vtas2mach(tas, h)
|
True airspeed (tas) to mach number conversion
| 28.845493 | 25.640848 | 1.124982 |
a = vvsound(h)
tas = M * a
return tas
|
def vmach2tas(M, h)
|
True airspeed (tas) to mach number conversion
| 17.968294 | 18.62159 | 0.964917 |
rho = vdensity(h)
tas = eas * np.sqrt(rho0 / rho)
return tas
|
def veas2tas(eas, h)
|
Equivalent airspeed to true airspeed
| 10.256752 | 10.250186 | 1.000641 |
p, rho, T = vatmos(h)
qdyn = p*((1.+rho*tas*tas/(7.*p))**3.5-1.)
cas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
# cope with negative speed
cas = np.where(tas<0, -1*cas, cas)
return cas
|
def vtas2cas(tas, h)
|
tas2cas conversion both m/s
| 10.466268 | 10.19347 | 1.026762 |
tas = vmach2tas(M, h)
cas = vtas2cas(tas, h)
return cas
|
def vmach2cas(M, h)
|
Mach to CAS conversion
| 3.606959 | 3.916879 | 0.920876 |
tas = vcas2tas(cas, h)
M = vtas2mach(tas, h)
return M
|
def vcas2mach(cas, h)
|
CAS to Mach conversion
| 4.715392 | 4.440948 | 1.061799 |
# Constants
# Base values and gradient in table from hand-out
# (but corrected to avoid small discontinuities at borders of layers)
h0 = [0.0, 11000., 20000., 32000., 47000., 51000., 71000., 86852.]
p0 = [101325., # Sea level
22631.7009099, # 11 km
5474.71768857, # 20 km
867.974468302, # 32 km
110.898214043, # 47 km
66.939, # 51 km
3.9564 ] # 71 km
T0 = [288.15, # Sea level
216.65, # 11 km
216.65, # 20 km
228.65, # 32 km
270.65, # 47 km
270.65, # 51 km
214.65] # 71 km
# a = lapse rate (temp gradient)
# integer 0 indicates isothermic layer!
a = [-0.0065, # 0-11 km
0, # 11-20 km
0.001, # 20-32 km
0.0028, # 32-47 km
0, # 47-51 km
-0.0028, # 51-71 km
-0.002] # 71- km
# Clip altitude to maximum!
h = max(0.0, min(float(h), h0[-1]))
# Find correct layer
i = 0
while h > h0[i+1] and i < len(h0) - 2:
i = i+1
# Calculate if sothermic layer
if a[i] == 0:
T = T0[i]
p = p0[i]*np.exp(-g0/(R*T)*(h-h0[i]))
rho = p/(R*T)
# Calculate for temperature gradient
else:
T = T0[i] + a[i]*(h-h0[i])
p = p0[i]*((T/T0[i])**(-g0/(a[i]*R)))
rho = p/(R*T)
return p, rho, T
|
def atmos(h)
|
atmos(altitude): International Standard Atmosphere calculator
Input:
h = altitude in meters 0.0 < h < 84852.
(will be clipped when outside range, integer input allowed)
Output:
[p,rho,T] (in SI-units: Pa, kg/m3 and K)
| 3.894429 | 3.684285 | 1.057038 |
# Base values and gradient in table from hand-out
# (but corrected to avoid small discontinuities at borders of layers)
h0 = [0.0, 11000., 20000., 32000., 47000., 51000., 71000., 86852.]
T0 = [288.15, # Sea level
216.65, # 11 km
216.65, # 20 km
228.65, # 32 km
270.65, # 47 km
270.65, # 51 km
214.65] # 71 km
# a = lapse rate (temp gradient)
# integer 0 indicates isothermic layer!
a = [-0.0065, # 0-11 km
0 , # 11-20 km
0.001, # 20-32 km
0.0028, # 32-47 km
0 , # 47-51 km
-0.0028, # 51-71 km
-0.002] # 71- km
# Clip altitude to maximum!
h = max(0.0,min(float(h),h0[-1]))
# Find correct layer
i = 0
while h>h0[i+1] and i<len(h0)-2:
i = i+1
# Calculate if sothermic layer
if a[i]==0:
T = T0[i]
# Calculate for temperature gradient
else:
T = T0[i] + a[i]*(h-h0[i])
return T
|
def temp(h)
|
temp (altitude): Temperature only version of ISA atmos
Input:
h = altitude in meters 0.0 < h < 84852.
(will be clipped when outside range, integer input allowed)
Output:
T (in SI-unit: K
| 4.227473 | 3.956948 | 1.068367 |
a = vsound(h)
M = tas / a
return M
|
def tas2mach(tas, h)
|
True airspeed (tas) to mach number conversion
| 15.18341 | 13.117996 | 1.157449 |
a = vsound(h)
tas = M * a
return tas
|
def mach2tas(M, h)
|
True airspeed (tas) to mach number conversion
| 9.887187 | 10.661449 | 0.927377 |
rho = density(h)
tas = eas * np.sqrt(rho0 / rho)
return tas
|
def eas2tas(eas, h)
|
Equivalent airspeed to true airspeed
| 7.95597 | 6.868509 | 1.158326 |
p, rho, T = atmos(h)
qdyn = p0*((1.+rho0*cas*cas/(7.*p0))**3.5-1.)
tas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
tas = -1 * tas if cas < 0 else tas
return tas
|
def cas2tas(cas, h)
|
cas2tas conversion both m/s h in m
| 9.495226 | 9.186571 | 1.033598 |
tas = mach2tas(M, h)
cas = tas2cas(tas, h)
return cas
|
def mach2cas(M, h)
|
Mach to CAS conversion
| 3.579304 | 3.523975 | 1.015701 |
tas = cas2tas(cas, h)
M = tas2mach(tas, h)
return M
|
def cas2mach(cas, h)
|
CAS Mach conversion
| 5.110984 | 4.85276 | 1.053212 |
if minimum_time is not None:
minimum_time = to_datetime(minimum_time)
traffic = traffic.query(f"timestamp >= '{minimum_time}'")
if isinstance(filename, str):
filename = Path(filename)
if not filename.parent.exists():
filename.parent.mkdir(parents=True)
altitude = (
"baro_altitude"
if "baro_altitude" in traffic.data.columns
else "altitude"
)
if "mdl" not in traffic.data.columns:
traffic = aircraft.merge(traffic)
if "cas" not in traffic.data.columns:
traffic = Traffic(
traffic.data.assign(
cas=vtas2cas(traffic.data.ground_speed, traffic.data[altitude])
)
)
with filename.open("w") as fh:
t_delta = traffic.data.timestamp - traffic.start_time
data = (
traffic.assign_id()
.data.groupby("flight_id")
.filter(lambda x: x.shape[0] > 3)
.assign(timedelta=t_delta.apply(fmt_timedelta))
.sort_values(by="timestamp")
)
for column in data.columns:
data[column] = data[column].astype(np.str)
is_created: List[str] = []
is_deleted: List[str] = []
start_time = cast(pd.Timestamp, traffic.start_time).time()
fh.write(f"00:00:00> TIME {start_time}\n")
# Add some bluesky command for the visualisation
# fh.write("00:00:00>trail on\n")
# fh.write("00:00:00>ssd conflicts\n")
# We remove an object when it's its last data point
buff = data.groupby("flight_id").timestamp.max()
dd = pd.DataFrame(
columns=["timestamp"], data=buff.values, index=buff.index.values
)
map_icao24_last_point = {}
for i, v in dd.iterrows():
map_icao24_last_point[i] = v[0]
# Main loop to write lines in the scenario file
for _, v in data.iterrows():
if v.flight_id not in is_created:
# If the object is not created then create it
is_created.append(v.flight_id)
fh.write(
f"{v.timedelta}> CRE {v.callsign} {v.mdl} "
f"{v.latitude} {v.longitude} {v.track} "
f"{v[altitude]} {v.cas}\n"
)
elif v.timestamp == map_icao24_last_point[v.flight_id]:
# Remove an aircraft when no data are available
if v.flight_id not in is_deleted:
is_deleted.append(v.flight_id)
fh.write(f"{v.timedelta}> DEL {v.callsign}\n")
elif v.flight_id not in is_deleted:
# Otherwise update the object position
fh.write(
f"{v.timedelta}> MOVE {v.callsign} "
f"{v.latitude} {v.longitude} {v[altitude]} "
f"{v.track} {v.cas} {v.vertical_rate}\n"
)
logging.info(f"Scenario file {filename} written")
|
def to_bluesky(
traffic: Traffic,
filename: Union[str, Path],
minimum_time: Optional[timelike] = None,
) -> None
|
Generates a Bluesky scenario file.
| 3.354698 | 3.300907 | 1.016296 |
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
results[name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
|
def import_submodules(package, recursive=True)
|
Import all submodules of a module, recursively, including subpackages
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
| 1.399748 | 1.558266 | 0.898273 |
return ax.scatter(
self.data.longitude,
self.data.latitude,
s=s,
transform=PlateCarree(),
**kwargs,
)
|
def plot(self, ax: GeoAxesSubplot, s: int = 10, **kwargs) -> Artist
|
Plotting function. All arguments are passed to ax.scatter
| 2.838403 | 2.661387 | 1.066513 |
if proj not in self.interpolator:
self.interpolator[proj] = interp1d(
np.stack(t.to_pydatetime().timestamp() for t in self.timestamp),
proj.transform_points(
PlateCarree(), *np.stack(self.coords).T
).T,
)
return PlateCarree().transform_points(
proj, *self.interpolator[proj](times)
)
|
def interpolate(self, times, proj=PlateCarree()) -> np.ndarray
|
Interpolates a trajectory in time.
| 4.039165 | 3.495576 | 1.155508 |
cumul = []
for f in self:
info = {
"flight_id": f.flight_id,
"callsign": f.callsign,
"origin": f.origin,
"destination": f.destination,
"duration": f.stop - f.start, # type: ignore
}
cumul.append(info)
return (
pd.DataFrame.from_records(cumul)
.set_index("flight_id")
.sort_values("duration", ascending=False)
)
|
def stats(self) -> pd.DataFrame
|
Statistics about flights contained in the structure.
Useful for a meaningful representation.
| 2.94039 | 2.485178 | 1.183171 |
if df is None and (isinstance(x, str) or isinstance(y, str)):
raise ValueError("Provide a dataframe if x and y are column names")
if df is None and (isinstance(lon, str) or isinstance(lat, str)):
raise ValueError("Provide a dataframe if lat and lon are column names")
if tolerance < 0:
raise ValueError("tolerance must be a positive float")
if df is not None and isinstance(lat, str) and isinstance(lon, str):
lat, lon = df[lat].values, df[lon].values
if df is not None and lat is not None and lon is not None:
lat, lon = np.array(lat), np.array(lon)
x, y = pyproj.transform(
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(proj='lcc',
lat0=lat.mean(), lon0=lon.mean(),
lat1=lat.min(), lat2=lat.max(),
), lon, lat)
else:
if df is not None:
x, y = df[x].values, df[y].values
x, y = np.array(x), np.array(y)
if z is not None:
if df is not None:
z = df[z].values
z = z_factor * np.array(z)
mask = np.ones(len(x), dtype=bool)
if z is None:
_douglas_peucker_rec(x, y, mask, tolerance)
else:
_douglas_peucker_rec_3d(x, y, z, mask, tolerance)
return mask
|
def douglas_peucker(*args, df: pd.DataFrame=None, tolerance: float,
x='x', y='y', z=None, z_factor: float = 3.048,
lat=None, lon=None) -> np.ndarray
|
Ramer-Douglas-Peucker algorithm for 2D/3D trajectories.
Simplify a trajectory by keeping the points further away from the straight
line.
Parameters:
df Optional a Pandas dataframe
tolerance float the threshold for cutting the
trajectory
z_factor float for ft/m conversion (default 3.048)
1km lateral, 100m vertical seems
like a good ratio
x, y, z str or ndarray[float] the column names if a dataframe is
given, otherwise a series of float
lat, lon str or ndarray[float] the column names if a dataframe is
given, otherwise a series of float.
x, y are built with a Lambert
Conformal projection
Note that lat, lon has precedence over x, y
Returns:
a np.array of booleans serving as a mask on the dataframe or
on the numpy array
See also: https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
| 1.898793 | 1.840905 | 1.031445 |
west, south, east, north = self.projection.boundary.bounds
self.set_extent((west, east, south, north), crs=self.projection)
|
def _set_default_extent(self)
|
Helper for a default extent limited to the projection boundaries.
| 4.603547 | 3.700673 | 1.243976 |
if "callsign" in df.columns and df.callsign.dtype == object:
df.callsign = df.callsign.str.strip()
if nautical_units:
df.altitude = df.altitude / 0.3048
if "geoaltitude" in df.columns:
df.geoaltitude = df.geoaltitude / 0.3048
if "groundspeed" in df.columns:
df.groundspeed = df.groundspeed / 1852 * 3600
if "vertical_rate" in df.columns:
df.vertical_rate = df.vertical_rate / 0.3048 * 60
df.timestamp = pd.to_datetime(df.timestamp * 1e9).dt.tz_localize("utc")
if "last_position" in df.columns:
df = df.query("last_position == last_position").assign(
last_position=pd.to_datetime(
df.last_position * 1e9
).dt.tz_localize("utc")
)
return df.sort_values("timestamp")
|
def _format_dataframe(
df: pd.DataFrame, nautical_units=True
) -> pd.DataFrame
|
This function converts types, strips spaces after callsigns and sorts
the DataFrame by timestamp.
For some reason, all data arriving from OpenSky are converted to
units in metric system. Optionally, you may convert the units back to
nautical miles, feet and feet/min.
| 2.070052 | 1.952331 | 1.060298 |
start = to_datetime(start)
stop = to_datetime(stop)
before_hour = round_time(start, "before")
after_hour = round_time(stop, "after")
try:
# thinking of shapely bounds attribute (in this order)
# I just don't want to add the shapely dependency here
west, south, east, north = bounds.bounds # type: ignore
except AttributeError:
west, south, east, north = bounds
other_params = "and lon>={} and lon<={} ".format(west, east)
other_params += "and lat>={} and lat<={} ".format(south, north)
query = self.basic_request.format(
columns="icao24, callsign, s.ITEM as serial, count(*) as count",
other_tables=", state_vectors_data4.serials s",
before_time=start.timestamp(),
after_time=stop.timestamp(),
before_hour=before_hour.timestamp(),
after_hour=after_hour.timestamp(),
other_params=other_params + "group by icao24, callsign, s.ITEM",
)
logging.info(f"Sending request: {query}")
df = self._impala(query, columns="icao24, callsign, serial, count")
if df is None:
return None
df = df[df["count"] != "count"]
df["count"] = df["count"].astype(int)
return df
|
def within_bounds(
self,
start: timelike,
stop: timelike,
bounds: Union[BaseGeometry, Tuple[float, float, float, float]],
) -> Optional[pd.DataFrame]
|
EXPERIMENTAL.
| 4.482916 | 4.497013 | 0.996865 |
start = to_datetime(start)
stop = to_datetime(stop)
before_hour = round_time(start, how="before")
after_hour = round_time(stop, how="after")
if isinstance(airport, str):
from traffic.data import airports
airport = cast(Airport, airports[airport])
other_params = (
"and lat<={airport_latmax} and lat>={airport_latmin} "
"and lon<={airport_lonmax} and lon>={airport_lonmin} "
"and baroaltitude<=1000 "
"group by icao24, callsign"
).format(
airport_latmax=airport.lat + 0.1,
airport_latmin=airport.lat - 0.1,
airport_lonmax=airport.lon + 0.1,
airport_lonmin=airport.lon - 0.1,
)
columns = "icao24, callsign"
other_tables = ""
if count is True:
columns = "count(*) as count, s.ITEM as serial, " + columns
other_tables += ", state_vectors_data4.serials s"
other_params += ", s.ITEM"
request = self.basic_request.format(
columns=columns,
before_time=start.timestamp(),
after_time=stop.timestamp(),
before_hour=before_hour.timestamp(),
after_hour=after_hour.timestamp(),
other_tables=other_tables,
other_params=other_params,
)
df = self._impala(request, columns="count, serial, icao24, callsign")
if (
df is not None
and "callsign" in df.columns
and df.callsign.dtype == object
):
df = df[df.callsign != "callsign"]
return df
|
def within_airport(
self,
start: timelike,
stop: timelike,
airport: Union[Airport, str],
count: bool = False,
) -> Optional[pd.DataFrame]
|
EXPERIMENTAL.
| 3.134723 | 3.140743 | 0.998083 |
# Generate a list of blacklisted packages from the configuration and
# store it into self.blacklist_package_names attribute so this
# operation doesn't end up in the fastpath.
if not self.whitelist_package_names:
self.whitelist_package_names = self._determine_unfiltered_package_names()
logger.info(
f"Initialized project plugin {self.name}, filtering "
+ f"{self.whitelist_package_names}"
)
|
def initialize_plugin(self)
|
Initialize the plugin
| 7.510211 | 7.417284 | 1.012528 |
# This plugin only processes packages, if the line in the packages
# configuration contains a PEP440 specifier it will be processed by the
# blacklist release filter. So we need to remove any packages that
# are not applicable for this plugin.
unfiltered_packages = set()
try:
lines = self.configuration["whitelist"]["packages"]
package_lines = lines.split("\n")
except KeyError:
package_lines = []
for package_line in package_lines:
package_line = package_line.strip()
if not package_line or package_line.startswith("#"):
continue
unfiltered_packages.add(package_line)
return list(unfiltered_packages)
|
def _determine_unfiltered_package_names(self)
|
Return a list of package names to be filtered base on the configuration
file.
| 4.666528 | 4.45113 | 1.048392 |
if not self.whitelist_package_names:
return False
name = kwargs.get("name", None)
if not name:
return False
if name in self.whitelist_package_names:
logger.info(f"Package {name!r} is whitelisted")
return False
return True
|
def check_match(self, **kwargs)
|
Check if the package name matches against a project that is blacklisted
in the configuration.
Parameters
==========
name: str
The normalized package name of the package/project to check against
the blacklist.
Returns
=======
bool:
True if it matches, False otherwise.
| 3.537873 | 3.311958 | 1.068212 |
if self._patterns or self._packagetypes:
logger.debug(
"Skipping initalization of Exclude Platform plugin. "
+ "Already initialized"
)
return
try:
tags = self.configuration["blacklist"]["platforms"].split()
except KeyError:
logger.error(f"Plugin {self.name}: missing platforms= setting")
return
for platform in tags:
lplatform = platform.lower()
if lplatform in ("windows", "win"):
# PEP 425
# see also setuptools/package_index.py
self._patterns.extend([".win32", "-win32", "win_amd64", "win-amd64"])
# PEP 527
self._packagetypes.extend(["bdist_msi", "bdist_wininst"])
elif lplatform in ("macos", "macosx"):
self._patterns.extend(["macosx_", "macosx-"])
self._packagetypes.extend(["bdist_dmg"])
elif lplatform in ("freebsd"):
# concerns only very few files
self._patterns.extend([".freebsd", "-freebsd"])
elif lplatform in ("linux"):
self._patterns.extend(
[
"linux-i686", # PEP 425
"linux-x86_64", # PEP 425
"linux_armv7l", # https://github.com/pypa/warehouse/pull/2010
"linux_armv6l", # https://github.com/pypa/warehouse/pull/2012
"manylinux1_", # PEP 513
"manylinux2010_", # PEP 571
]
)
self._packagetypes.extend(["bdist_rpm"])
logger.info(f"Initialized {self.name} plugin with {self._patterns!r}")
|
def initialize_plugin(self)
|
Initialize the plugin reading patterns from the config.
| 3.433569 | 3.368835 | 1.019216 |
# Make a copy of releases keys
# as we may delete packages during iteration
removed = 0
versions = list(releases.keys())
for version in versions:
new_files = []
for file_desc in releases[version]:
if self._check_match(file_desc):
removed += 1
else:
new_files.append(file_desc)
if len(new_files) == 0:
del releases[version]
else:
releases[version] = new_files
logger.debug(f"{self.name}: filenames removed: {removed}")
|
def filter(self, info, releases)
|
Remove files from `releases` that match any pattern.
| 4.079956 | 3.796419 | 1.074685 |
# source dist: never filter out
pt = file_desc.get("packagetype")
if pt == "sdist":
return False
# Windows installer
if pt in self._packagetypes:
return True
fn = file_desc["filename"]
for i in self._patterns:
if i in fn:
return True
return False
|
def _check_match(self, file_desc) -> bool
|
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: file_desc
file description entry
Returns
=======
bool:
True if it matches, False otherwise.
| 7.811331 | 6.791408 | 1.150178 |
global loaded_filter_plugins
enabled_plugins: List[str] = []
config = BandersnatchConfig().config
try:
config_blacklist_plugins = config["blacklist"]["plugins"]
split_plugins = config_blacklist_plugins.split("\n")
if "all" in split_plugins:
enabled_plugins = ["all"]
else:
for plugin in split_plugins:
if not plugin:
continue
enabled_plugins.append(plugin)
except KeyError:
pass
# If the plugins for the entrypoint_group have been loaded return them
cached_plugins = loaded_filter_plugins.get(entrypoint_group)
if cached_plugins:
return cached_plugins
plugins = set()
for entry_point in pkg_resources.iter_entry_points(group=entrypoint_group):
plugin_class = entry_point.load()
plugin_instance = plugin_class()
if "all" in enabled_plugins or plugin_instance.name in enabled_plugins:
plugins.add(plugin_instance)
loaded_filter_plugins[entrypoint_group] = list(plugins)
return plugins
|
def load_filter_plugins(entrypoint_group: str) -> Iterable[Filter]
|
Load all blacklist plugins that are registered with pkg_resources
Parameters
==========
entrypoint_group: str
The entrypoint group name to load plugins from
Returns
=======
List of Blacklist:
A list of objects derived from the Blacklist class
| 2.547762 | 2.723293 | 0.935545 |
if not self.patterns:
self.patterns = [
re.compile(pattern_string)
for pattern_string in self.PRERELEASE_PATTERNS
]
logger.info(f"Initialized prerelease plugin with {self.patterns}")
|
def initialize_plugin(self)
|
Initialize the plugin reading patterns from the config.
| 5.29538 | 4.419956 | 1.198062 |
for version in list(releases.keys()):
if any(pattern.match(version) for pattern in self.patterns):
del releases[version]
|
def filter(self, info, releases)
|
Remove all release versions that match any of the specificed patterns.
| 4.100671 | 2.722218 | 1.506371 |
# TODO: should retrieving the plugin's config be part of the base class?
try:
config = self.configuration["filter_regex"]["releases"]
except KeyError:
return
else:
if not self.patterns:
pattern_strings = [pattern for pattern in config.split("\n") if pattern]
self.patterns = [
re.compile(pattern_string) for pattern_string in pattern_strings
]
logger.info(f"Initialized regex release plugin with {self.patterns}")
|
def initialize_plugin(self)
|
Initialize the plugin reading patterns from the config.
| 5.799982 | 5.333913 | 1.087378 |
return any(pattern.match(name) for pattern in self.patterns)
|
def check_match(self, name)
|
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise.
| 6.357033 | 7.637868 | 0.832305 |
# Generate a list of blacklisted packages from the configuration and
# store it into self.blacklist_package_names attribute so this
# operation doesn't end up in the fastpath.
if not self.blacklist_package_names:
self.blacklist_package_names = self._determine_filtered_package_names()
logger.info(
f"Initialized project plugin {self.name}, filtering "
+ f"{self.blacklist_package_names}"
)
|
def initialize_plugin(self)
|
Initialize the plugin
| 7.121924 | 7.033882 | 1.012517 |
# This plugin only processes packages, if the line in the packages
# configuration contains a PEP440 specifier it will be processed by the
# blacklist release filter. So we need to remove any packages that
# are not applicable for this plugin.
filtered_packages = set()
try:
lines = self.configuration["blacklist"]["packages"]
package_lines = lines.split("\n")
except KeyError:
package_lines = []
for package_line in package_lines:
package_line = package_line.strip()
if not package_line or package_line.startswith("#"):
continue
package_requirement = Requirement(package_line)
if package_requirement.specifier:
continue
if package_requirement.name != package_line:
logger.debug(
"Package line %r does not requirement name %r",
package_line,
package_requirement.name,
)
continue
filtered_packages.add(package_line)
logger.debug("Project blacklist is %r", list(filtered_packages))
return list(filtered_packages)
|
def _determine_filtered_package_names(self)
|
Return a list of package names to be filtered base on the configuration
file.
| 3.878985 | 3.744281 | 1.035976 |
name = kwargs.get("name", None)
if not name:
return False
if name in self.blacklist_package_names:
logger.info(f"Package {name!r} is blacklisted")
return True
return False
|
def check_match(self, **kwargs)
|
Check if the package name matches against a project that is blacklisted
in the configuration.
Parameters
==========
name: str
The normalized package name of the package/project to check against
the blacklist.
Returns
=======
bool:
True if it matches, False otherwise.
| 4.381137 | 4.159318 | 1.053331 |
# Generate a list of blacklisted packages from the configuration and
# store it into self.blacklist_package_names attribute so this
# operation doesn't end up in the fastpath.
if not self.blacklist_package_names:
self.blacklist_release_requirements = (
self._determine_filtered_package_requirements()
)
logger.info(
f"Initialized release plugin {self.name}, filtering "
+ f"{self.blacklist_release_requirements}"
)
|
def initialize_plugin(self)
|
Initialize the plugin
| 8.474978 | 8.345387 | 1.015529 |
filtered_requirements = set()
try:
lines = self.configuration["blacklist"]["packages"]
package_lines = lines.split("\n")
except KeyError:
package_lines = []
for package_line in package_lines:
package_line = package_line.strip()
if not package_line or package_line.startswith("#"):
continue
filtered_requirements.add(Requirement(package_line))
return list(filtered_requirements)
|
def _determine_filtered_package_requirements(self)
|
Parse the configuration file for [blacklist]packages
Returns
-------
list of packaging.requirements.Requirement
For all PEP440 package specifiers
| 2.619919 | 2.430369 | 1.077992 |
if not name or not version_string:
return False
try:
version = Version(version_string)
except InvalidVersion:
logger.debug(f"Package {name}=={version_string} has an invalid version")
return False
for requirement in self.blacklist_release_requirements:
if name != requirement.name:
continue
if version in requirement.specifier:
logger.debug(
f"MATCH: Release {name}=={version} matches specifier "
f"{requirement.specifier}"
)
return True
return False
|
def _check_match(self, name, version_string) -> bool
|
Check if the package name and version matches against a blacklisted
package version specifier.
Parameters
==========
name: str
Package name
version: str
Package version
Returns
=======
bool:
True if it matches, False otherwise.
| 3.905338 | 3.776792 | 1.034036 |
if isinstance(root, str):
root = Path(root)
results: List[Path] = []
for dirpath, dirnames, filenames in os.walk(root):
names = filenames
if dirs:
names += dirnames
for name in names:
results.append(Path(dirpath) / name)
results.sort()
return "\n".join(str(result.relative_to(root)) for result in results)
|
def find(root: Union[Path, str], dirs: bool = True) -> str
|
A test helper simulating 'find'.
Iterates over directories and filenames, given as relative paths to the
root.
| 2.248203 | 2.359627 | 0.952779 |
if isinstance(filepath, str):
base_dir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
else:
base_dir = str(filepath.parent)
filename = filepath.name
# Change naming format to be more friendly with distributed POSIX
# filesystems like GlusterFS that hash based on filename
# GlusterFS ignore '.' at the start of filenames and this avoid rehashing
with tempfile.NamedTemporaryFile(
mode=mode, prefix=f".{filename}.", delete=False, dir=base_dir, **kw
) as f:
filepath_tmp = f.name
yield f
if not os.path.exists(filepath_tmp):
# Allow our clients to remove the file in case it doesn't want it to be
# put in place actually but also doesn't want to error out.
return
os.chmod(filepath_tmp, 0o100644)
os.rename(filepath_tmp, filepath)
|
def rewrite(
filepath: Union[str, Path], mode: str = "w", **kw: Any
) -> Generator[IO, None, None]
|
Rewrite an existing file atomically to avoid programs running in
parallel to have race conditions while reading.
| 5.305805 | 5.169889 | 1.02629 |
logger.info(f"unlink {str(path)}")
path.unlink()
parent_path = path.parent
try:
parent_path.rmdir()
logger.info(f"rmdir {str(parent_path)}")
except OSError as oe:
logger.debug(f"Did not remove {str(parent_path)}: {str(oe)}")
|
def unlink_parent_dir(path: Path) -> None
|
Remove a file and if the dir is empty remove it
| 2.755413 | 2.459166 | 1.120466 |
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename),
delete=False,
prefix=f"{os.path.basename(filename)}.",
**kw,
) as tf:
if os.path.exists(filename):
os.chmod(tf.name, os.stat(filename).st_mode & 0o7777)
tf.has_changed = False # type: ignore
yield tf
if not os.path.exists(tf.name):
return
filename_tmp = tf.name
if os.path.exists(filename) and filecmp.cmp(filename, filename_tmp, shallow=False):
os.unlink(filename_tmp)
else:
os.rename(filename_tmp, filename)
tf.has_changed = True
|
def update_safe(filename: str, **kw: Any) -> Generator[IO, None, None]
|
Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated.
| 2.185838 | 2.24535 | 0.973495 |
try:
with utils.rewrite(self.json_file) as jf:
dump(package_info, jf, indent=4, sort_keys=True)
except Exception as e:
logger.error(
"Unable to write json to {}: {}".format(self.json_file, str(e))
)
return False
symlink_dir = self.json_pypi_symlink.parent
if not symlink_dir.exists():
symlink_dir.mkdir()
try:
# If symlink already exists throw a FileExistsError
self.json_pypi_symlink.symlink_to(self.json_file)
except FileExistsError:
pass
return True
|
def save_json_metadata(self, package_info: Dict) -> bool
|
Take the JSON metadata we just fetched and save to disk
| 3.350513 | 3.126181 | 1.071759 |
global display_filter_log
filter_plugins = filter_release_plugins()
if not filter_plugins:
if display_filter_log:
logger.info("No release filters are enabled. Skipping filtering")
display_filter_log = False
else:
for plugin in filter_plugins:
plugin.filter(self.info, self.releases)
|
def _filter_releases(self)
|
Run the release filtering plugins
| 5.577287 | 4.485087 | 1.243518 |
release_files = []
for release in self.releases.values():
release_files.extend(release)
downloaded_files = set()
deferred_exception = None
for release_file in release_files:
try:
downloaded_file = self.download_file(
release_file["url"], release_file["digests"]["sha256"]
)
if downloaded_file:
downloaded_files.add(
str(downloaded_file.relative_to(self.mirror.homedir))
)
except Exception as e:
logger.exception(
f"Continuing to next file after error downloading: "
f"{release_file['url']}"
)
if not deferred_exception: # keep first exception
deferred_exception = e
if deferred_exception:
raise deferred_exception # raise the exception after trying all files
self.mirror.altered_packages[self.name] = downloaded_files
|
def sync_release_files(self)
|
Purge + download files returning files removed + added
| 3.369532 | 3.274845 | 1.028913 |
if self.todolist.exists():
try:
saved_todo = iter(open(self.todolist, encoding="utf-8"))
int(next(saved_todo).strip())
for line in saved_todo:
_, serial = line.strip().split()
int(serial)
except (StopIteration, ValueError):
# The todo list was inconsistent. This may happen if we get
# killed e.g. by the timeout wrapper. Just remove it - we'll
# just have to do whatever happened since the last successful
# sync.
logger.info("Removing inconsistent todo list.")
self.todolist.unlink()
|
def _cleanup(self)
|
Does a couple of cleanup tasks to ensure consistent data for later
processing.
| 6.953408 | 6.7396 | 1.031724 |
global LOG_PLUGINS
filter_plugins = filter_project_plugins()
if not filter_plugins:
if LOG_PLUGINS:
logger.info("No project filters are enabled. Skipping filtering")
LOG_PLUGINS = False
return
# Make a copy of self.packages_to_sync keys
# as we may delete packages during iteration
packages = list(self.packages_to_sync.keys())
for package_name in packages:
for plugin in filter_plugins:
if plugin.check_match(name=package_name):
if package_name not in self.packages_to_sync:
logger.debug(
f"{package_name} not found in packages to sync - "
+ f"{plugin.name} has no effect here ..."
)
else:
del self.packages_to_sync[package_name]
|
def _filter_packages(self)
|
Run the package filtering plugins and remove any packages from the
packages_to_sync that match any filters.
- Logging of action will be done within the check_match methods
| 4.252724 | 3.695913 | 1.150656 |
# In case we don't find any changes we will stay on the currently
# synced serial.
self.target_serial = self.synced_serial
self.packages_to_sync = {}
logger.info(f"Current mirror serial: {self.synced_serial}")
if self.todolist.exists():
# We started a sync previously and left a todo list as well as the
# targetted serial. We'll try to keep going through the todo list
# and then mark the targetted serial as done.
logger.info("Resuming interrupted sync from local todo list.")
saved_todo = iter(open(self.todolist, encoding="utf-8"))
self.target_serial = int(next(saved_todo).strip())
for line in saved_todo:
package, serial = line.strip().split()
self.packages_to_sync[package] = int(serial)
elif not self.synced_serial:
logger.info("Syncing all packages.")
# First get the current serial, then start to sync. This makes us
# more defensive in case something changes on the server between
# those two calls.
self.packages_to_sync.update(self.master.all_packages())
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
else:
logger.info("Syncing based on changelog.")
self.packages_to_sync.update(
self.master.changed_packages(self.synced_serial)
)
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
# We can avoid downloading the main index page if we don't have
# anything todo at all during a changelog-based sync.
self.need_index_sync = bool(self.packages_to_sync)
self._filter_packages()
logger.info(f"Trying to reach serial: {self.target_serial}")
pkg_count = len(self.packages_to_sync)
logger.info(f"{pkg_count} packages to sync.")
|
def determine_packages_to_sync(self)
|
Update the self.packages_to_sync to contain packages that need to be
synced.
| 3.898547 | 3.86512 | 1.008649 |
if self.hash_index:
# We are using index page directory hashing, so the directory
# format is /simple/f/foo/. We want to return a list of dirs
# like "simple/f".
subdirs = [simple_dir / x for x in simple_dir.iterdir() if x.is_dir()]
else:
# This is the traditional layout of /simple/foo/. We should
# return a single directory, "simple".
subdirs = [simple_dir]
return subdirs
|
def get_simple_dirs(self, simple_dir: Path) -> List[Path]
|
Return a list of simple index directories that should be searched
for package indexes when compiling the main index page.
| 6.521365 | 5.815109 | 1.121452 |
packages = sorted(
{
# Filter out all of the "non" normalized names here
canonicalize_name(x)
for x in os.listdir(simple_dir)
}
)
# Package indexes must be in directories, so ignore anything else.
packages = [x for x in packages if os.path.isdir(os.path.join(simple_dir, x))]
return packages
|
def find_package_indexes_in_dir(self, simple_dir)
|
Given a directory that contains simple packages indexes, return
a sorted list of normalized package names. This presumes every
directory within is a simple package index directory.
| 5.672504 | 4.84974 | 1.169651 |
config_file = self.default_config_file
if self.config_file:
config_file = self.config_file
self.config = ConfigParser()
self.config.read(config_file)
|
def load_configuration(self) -> None
|
Read the configuration from a configuration file
| 2.606834 | 2.269943 | 1.148414 |
all_package_files = [] # type: List[Path]
loop = asyncio.get_event_loop()
mirror_base = config.get("mirror", "directory")
json_base = Path(mirror_base) / "web" / "json"
workers = args.workers or config.getint("mirror", "workers")
executor = concurrent.futures.ThreadPoolExecutor(max_workers=workers)
logger.info(f"Starting verify for {mirror_base} with {workers} workers")
try:
json_files = await loop.run_in_executor(executor, os.listdir, json_base)
except FileExistsError as fee:
logger.error(f"Metadata base dir {json_base} does not exist: {fee}")
return 2
if not json_files:
logger.error("No JSON metadata files found. Can not verify")
return 3
logger.debug(f"Found {len(json_files)} objects in {json_base}")
logger.debug(f"Using a {workers} thread ThreadPoolExecutor")
await async_verify(
config, all_package_files, mirror_base, json_files, args, executor
)
if not args.delete:
return 0
return await delete_files(mirror_base, executor, all_package_files, args.dry_run)
|
async def metadata_verify(config, args) -> int
|
Crawl all saved JSON metadata or online to check we have all packages
if delete - generate a diff of unowned files
| 3.457656 | 3.353661 | 1.031009 |
pem_key_chunks = [('-----BEGIN %s-----' % marker).encode('utf-8')]
# Limit base64 output lines to 64 characters by limiting input lines to 48 characters.
for chunk_start in range(0, len(der_key), 48):
pem_key_chunks.append(b64encode(der_key[chunk_start:chunk_start + 48]))
pem_key_chunks.append(('-----END %s-----' % marker).encode('utf-8'))
return b'\n'.join(pem_key_chunks)
|
def _der_to_pem(der_key, marker)
|
Perform a simple DER to PEM conversion.
| 2.580524 | 2.572819 | 1.002995 |
decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())
try:
decoded_key = decoded_values[0]
except IndexError:
raise ValueError("Invalid private key encoding")
return decoded_key["privateKey"]
|
def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key)
|
Convert a PKCS8-encoded RSA private key to PKCS1.
| 4.285907 | 4.443919 | 0.964443 |
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PKCS8PrivateKey()
pkcs8_key["version"] = 0
pkcs8_key["privateKeyAlgorithm"] = algorithm
pkcs8_key["privateKey"] = pkcs1_key
return encoder.encode(pkcs8_key)
|
def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key)
|
Convert a PKCS1-encoded RSA private key to PKCS8.
| 2.945636 | 2.867038 | 1.027414 |
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PublicKeyInfo()
pkcs8_key["algorithm"] = algorithm
pkcs8_key["publicKey"] = univ.BitString.fromOctetString(pkcs1_key)
return encoder.encode(pkcs8_key)
|
def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key)
|
Convert a PKCS1-encoded RSA private key to PKCS8.
| 3.364646 | 3.289526 | 1.022836 |
decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())
try:
decoded_key = decoded_values[0]
except IndexError:
raise ValueError("Invalid public key encoding.")
return decoded_key["publicKey"].asOctets()
|
def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key)
|
Convert a PKCS8-encoded RSA private key to PKCS1.
| 3.957922 | 4.205462 | 0.941138 |
r, s = decode_dss_signature(der_signature)
component_length = self._sig_component_length()
return int_to_bytes(r, component_length) + int_to_bytes(s, component_length)
|
def _der_to_raw(self, der_signature)
|
Convert signature from DER encoding to RAW encoding.
| 3.601413 | 3.479367 | 1.035077 |
component_length = self._sig_component_length()
if len(raw_signature) != int(2 * component_length):
raise ValueError("Invalid signature")
r_bytes = raw_signature[:component_length]
s_bytes = raw_signature[component_length:]
r = int_from_bytes(r_bytes, "big")
s = int_from_bytes(s_bytes, "big")
return encode_dss_signature(r, s)
|
def _raw_to_der(self, raw_signature)
|
Convert signature from RAW encoding to DER encoding.
| 2.539786 | 2.355326 | 1.078316 |
for time_claim in ['exp', 'iat', 'nbf']:
# Convert datetime to a intDate value in known time-format claims
if isinstance(claims.get(time_claim), datetime):
claims[time_claim] = timegm(claims[time_claim].utctimetuple())
if access_token:
claims['at_hash'] = calculate_at_hash(access_token,
ALGORITHMS.HASHES[algorithm])
return jws.sign(claims, key, headers=headers, algorithm=algorithm)
|
def encode(claims, key, algorithm=ALGORITHMS.HS256, headers=None, access_token=None)
|
Encodes a claims set and returns a JWT string.
JWTs are JWS signed objects with a few reserved claims.
Args:
claims (dict): A claims set to sign
key (str or dict): The key to use for signing the claim set. Can be
individual JWK or JWK set.
algorithm (str, optional): The algorithm to use for signing the
the claims. Defaults to HS256.
headers (dict, optional): A set of headers that will be added to
the default headers. Any headers that are added as additional
headers will override the default headers.
access_token (str, optional): If present, the 'at_hash' claim will
be calculated and added to the claims present in the 'claims'
parameter.
Returns:
str: The string representation of the header, claims, and signature.
Raises:
JWTError: If there is an error encoding the claims.
Examples:
>>> jwt.encode({'a': 'b'}, 'secret', algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
| 4.701973 | 5.373826 | 0.874977 |
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
if options:
defaults.update(options)
verify_signature = defaults.get('verify_signature', True)
try:
payload = jws.verify(token, key, algorithms, verify=verify_signature)
except JWSError as e:
raise JWTError(e)
# Needed for at_hash verification
algorithm = jws.get_unverified_header(token)['alg']
try:
claims = json.loads(payload.decode('utf-8'))
except ValueError as e:
raise JWTError('Invalid payload string: %s' % e)
if not isinstance(claims, Mapping):
raise JWTError('Invalid payload string: must be a json object')
_validate_claims(claims, audience=audience, issuer=issuer,
subject=subject, algorithm=algorithm,
access_token=access_token,
options=defaults)
return claims
|
def decode(token, key, algorithms=None, options=None, audience=None,
issuer=None, subject=None, access_token=None)
|
Verifies a JWT string's signature and validates reserved claims.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
audience (str): The intended audience of the token. If the "aud" claim is
included in the claim set, then the audience must be included and must equal
the provided claim.
issuer (str or iterable): Acceptable value(s) for the issuer of the token.
If the "iss" claim is included in the claim set, then the issuer must be
given and the claim in the token must be among the acceptable values.
subject (str): The subject of the token. If the "sub" claim is
included in the claim set, then the subject must be included and must equal
the provided claim.
access_token (str): An access token string. If the "at_hash" claim is included in the
claim set, then the access_token must be included, and it must match
the "at_hash" claim.
options (dict): A dictionary of options for skipping validation steps.
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
Returns:
dict: The dict representation of the claims set, assuming the signature is valid
and all requested data validation passes.
Raises:
JWTError: If the signature is invalid in any way.
ExpiredSignatureError: If the signature has expired.
JWTClaimsError: If any claim is invalid in any way.
Examples:
>>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jwt.decode(payload, 'secret', algorithms='HS256')
| 1.848226 | 1.840282 | 1.004317 |
if 'nbf' not in claims:
return
try:
nbf = int(claims['nbf'])
except ValueError:
raise JWTClaimsError('Not Before claim (nbf) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if nbf > (now + leeway):
raise JWTClaimsError('The token is not yet valid (nbf)')
|
def _validate_nbf(claims, leeway=0)
|
Validates that the 'nbf' claim is valid.
The "nbf" (not before) claim identifies the time before which the JWT
MUST NOT be accepted for processing. The processing of the "nbf"
claim requires that the current date/time MUST be after or equal to
the not-before date/time listed in the "nbf" claim. Implementers MAY
provide for some small leeway, usually no more than a few minutes, to
account for clock skew. Its value MUST be a number containing a
NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
| 2.599125 | 2.621686 | 0.991394 |
if 'exp' not in claims:
return
try:
exp = int(claims['exp'])
except ValueError:
raise JWTClaimsError('Expiration Time claim (exp) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if exp < (now - leeway):
raise ExpiredSignatureError('Signature has expired.')
|
def _validate_exp(claims, leeway=0)
|
Validates that the 'exp' claim is valid.
The "exp" (expiration time) claim identifies the expiration time on
or after which the JWT MUST NOT be accepted for processing. The
processing of the "exp" claim requires that the current date/time
MUST be before the expiration date/time listed in the "exp" claim.
Implementers MAY provide for some small leeway, usually no more than
a few minutes, to account for clock skew. Its value MUST be a number
containing a NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
| 2.771711 | 2.865899 | 0.967135 |
if 'aud' not in claims:
# if audience:
# raise JWTError('Audience claim expected, but not in claims')
return
audience_claims = claims['aud']
if isinstance(audience_claims, string_types):
audience_claims = [audience_claims]
if not isinstance(audience_claims, list):
raise JWTClaimsError('Invalid claim format in token')
if any(not isinstance(c, string_types) for c in audience_claims):
raise JWTClaimsError('Invalid claim format in token')
if audience not in audience_claims:
raise JWTClaimsError('Invalid audience')
|
def _validate_aud(claims, audience=None)
|
Validates that the 'aud' claim is valid.
The "aud" (audience) claim identifies the recipients that the JWT is
intended for. Each principal intended to process the JWT MUST
identify itself with a value in the audience claim. If the principal
processing the claim does not identify itself with a value in the
"aud" claim when this claim is present, then the JWT MUST be
rejected. In the general case, the "aud" value is an array of case-
sensitive strings, each containing a StringOrURI value. In the
special case when the JWT has one audience, the "aud" value MAY be a
single case-sensitive string containing a StringOrURI value. The
interpretation of audience values is generally application specific.
Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
audience (str): The audience that is verifying the token.
| 2.895017 | 2.972038 | 0.974085 |
if issuer is not None:
if isinstance(issuer, string_types):
issuer = (issuer,)
if claims.get('iss') not in issuer:
raise JWTClaimsError('Invalid issuer')
|
def _validate_iss(claims, issuer=None)
|
Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
| 3.38572 | 3.165109 | 1.069701 |
if 'sub' not in claims:
return
if not isinstance(claims['sub'], string_types):
raise JWTClaimsError('Subject must be a string.')
if subject is not None:
if claims.get('sub') != subject:
raise JWTClaimsError('Invalid subject')
|
def _validate_sub(claims, subject=None)
|
Validates that the 'sub' claim is valid.
The "sub" (subject) claim identifies the principal that is the
subject of the JWT. The claims in a JWT are normally statements
about the subject. The subject value MUST either be scoped to be
locally unique in the context of the issuer or be globally unique.
The processing of this claim is generally application specific. The
"sub" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
subject (str): The subject of the token.
| 2.983815 | 3.020381 | 0.987893 |
while b:
a, b = b, (a % b)
return a
|
def _gcd(a, b)
|
Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
| 3.366181 | 5.673184 | 0.593349 |
# See 8.2.2(i) in Handbook of Applied Cryptography.
ktot = d * e - 1
# The quantity d*e-1 is a multiple of phi(n), even,
# and can be represented as t*2^s.
t = ktot
while t % 2 == 0:
t = t // 2
# Cycle through all multiplicative inverses in Zn.
# The algorithm is non-deterministic, but there is a 50% chance
# any candidate a leads to successful factoring.
# See "Digitalized Signatures and Public Key Functions as Intractable
# as Factorization", M. Rabin, 1979
spotted = False
a = 2
while not spotted and a < _MAX_RECOVERY_ATTEMPTS:
k = t
# Cycle through all values a^{t*2^i}=a^k
while k < ktot:
cand = pow(a, k, n)
# Check if a^k is a non-trivial root of unity (mod n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
# We have found a number such that (cand-1)(cand+1)=0 (mod n).
# Either of the terms divides n.
p = _gcd(cand + 1, n)
spotted = True
break
k *= 2
# This value was not any good... let's try another!
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
# Found !
q, r = divmod(n, p)
assert r == 0
p, q = sorted((p, q), reverse=True)
return (p, q)
|
def _rsa_recover_prime_factors(n, e, d)
|
Compute factors p and q from the private exponent d. We assume that n has
no more than two factors. This function is adapted from code in PyCrypto.
| 6.367764 | 6.188639 | 1.028944 |
# Only allow this processing if the prefix matches
# AND the following byte indicates an ASN1 sequence,
# as we would expect with the legacy encoding.
if not pkcs8_key.startswith(LEGACY_INVALID_PKCS8_RSA_HEADER + ASN1_SEQUENCE_ID):
raise ValueError("Invalid private key encoding")
return pkcs8_key[len(LEGACY_INVALID_PKCS8_RSA_HEADER):]
|
def _legacy_private_key_pkcs8_to_pkcs1(pkcs8_key)
|
Legacy RSA private key PKCS8-to-PKCS1 conversion.
.. warning::
This is incorrect parsing and only works because the legacy PKCS1-to-PKCS8
encoding was also incorrect.
| 8.000032 | 7.979383 | 1.002588 |
hash_digest = hash_alg(access_token.encode('utf-8')).digest()
cut_at = int(len(hash_digest) / 2)
truncated = hash_digest[:cut_at]
at_hash = base64url_encode(truncated)
return at_hash.decode('utf-8')
|
def calculate_at_hash(access_token, hash_alg)
|
Helper method for calculating an access token
hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
Its value is the base64url encoding of the left-most half of the hash of the octets
of the ASCII representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE
Header. For instance, if the alg is RS256, hash the access_token value with SHA-256,
then take the left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string.
Args:
access_token (str): An access token string.
hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256
| 2.582064 | 3.072082 | 0.840493 |
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
|
def base64url_decode(input)
|
Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
| 2.119771 | 2.912774 | 0.72775 |
try:
return hmac.compare_digest(a, b)
except AttributeError:
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
def constant_time_string_compare(a, b)
|
Helper for comparing string in constant time, independent
of the python version being used.
Args:
a (str): A string to compare
b (str): A string to compare
| 2.093161 | 2.266815 | 0.923393 |
if algorithm not in ALGORITHMS.SUPPORTED:
raise JWSError('Algorithm %s not supported.' % algorithm)
encoded_header = _encode_header(algorithm, additional_headers=headers)
encoded_payload = _encode_payload(payload)
signed_output = _sign_header_and_claims(encoded_header, encoded_payload, algorithm, key)
return signed_output
|
def sign(payload, key, headers=None, algorithm=ALGORITHMS.HS256)
|
Signs a claims set and returns a JWS string.
Args:
payload (str): A string to sign
key (str or dict): The key to use for signing the claim set. Can be
individual JWK or JWK set.
headers (dict, optional): A set of headers that will be added to
the default headers. Any headers that are added as additional
headers will override the default headers.
algorithm (str, optional): The algorithm to use for signing the
the claims. Defaults to HS256.
Returns:
str: The string representation of the header, claims, and signature.
Raises:
JWSError: If there is an error signing the token.
Examples:
>>> jws.sign({'a': 'b'}, 'secret', algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
| 3.357514 | 3.86966 | 0.867651 |
header, payload, signing_input, signature = _load(token)
if verify:
_verify_signature(signing_input, header, signature, key, algorithms)
return payload
|
def verify(token, key, algorithms, verify=True)
|
Verifies a JWS string's signature.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
Returns:
str: The str representation of the payload, assuming the signature is valid.
Raises:
JWSError: If there is an exception verifying a token.
Examples:
>>> token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jws.verify(token, 'secret', algorithms='HS256')
| 4.537582 | 8.938622 | 0.507638 |
# Allow for pulling the algorithm off of the passed in jwk.
if not algorithm and isinstance(key_data, dict):
algorithm = key_data.get('alg', None)
if not algorithm:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
key_class = get_key(algorithm)
if not key_class:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
return key_class(key_data, algorithm)
|
def construct(key_data, algorithm=None)
|
Construct a Key object for the given algorithm with the given
key_data.
| 2.983263 | 3.055036 | 0.976506 |
pruning = self._find_root_pruning(entry_id)
return self.classes[self._best_label(pruning)]
|
def report_entry_label(self, entry_id)
|
Return the best label of the asked entry.
Parameters
----------
entry_id : int
The index of the sample to ask.
Returns
-------
label: object
The best label of the given sample.
| 12.637207 | 9.391349 | 1.345622 |
labels = np.empty(len(self.dataset), dtype=int)
for pruning in self.prunings:
best_label = self._best_label(pruning)
leaves = self._find_leaves(pruning)
labels[leaves] = best_label
return labels
|
def report_all_label(self)
|
Return the best label of the asked entry.
Parameters
----------
Returns
-------
labels: list of object, shape=(m)
The best label of all samples.
| 4.880155 | 4.543487 | 1.074099 |
r
X, Y = dataset.format_sklearn()
X = np.array(X)
Y = np.array(Y)
self.n_labels_ = np.shape(Y)[1]
self.n_features_ = np.shape(X)[1]
self.clfs_ = []
for i in range(self.n_labels_):
# TODO should we handle it here or we should handle it before calling
if len(np.unique(Y[:, i])) == 1:
clf = DummyClf()
else:
clf = copy.deepcopy(self.base_clf)
self.clfs_.append(clf)
Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(_fit_model)(self.clfs_[i], X, Y[:, i])
for i in range(self.n_labels_))
#clf.train(Dataset(X, Y[:, i]))
return self
|
def train(self, dataset)
|
r"""Train model with given feature.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Train feature vector.
Y : array-like, shape=(n_samples, n_labels)
Target labels.
Attributes
----------
clfs\_ : list of :py:mod:`libact.models` object instances
Classifier instances.
Returns
-------
self : object
Retuen self.
| 3.413189 | 3.35669 | 1.016832 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.